diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h index fe7905c33080f..a318ef0b6bd68 100644 --- a/bolt/include/bolt/Core/MCPlusBuilder.h +++ b/bolt/include/bolt/Core/MCPlusBuilder.h @@ -1888,6 +1888,12 @@ class MCPlusBuilder { llvm_unreachable("not implemented"); } + /// Update operand of BTI instruction. + virtual void updateBTIVariant(MCInst &Inst, bool CallTarget, + bool JumpTarget) const { + llvm_unreachable("not implemented"); + } + /// Store \p Target absolute address to \p RegName virtual InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx, diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp index f1291f676f1b5..af87d5c12b5ce 100644 --- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp +++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp @@ -2800,6 +2800,14 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { Inst.getOpcode() == AArch64::PACIBSP; } + void updateBTIVariant(MCInst &Inst, bool CallTarget, + bool JumpTarget) const override { + assert(Inst.getOpcode() == AArch64::HINT && "Not a BTI instruction."); + unsigned HintNum = getBTIHintNum(CallTarget, JumpTarget); + Inst.clear(); + Inst.addOperand(MCOperand::createImm(HintNum)); + } + InstructionListType materializeAddress(const MCSymbol *Target, MCContext *Ctx, MCPhysReg RegName, int64_t Addend = 0) const override { diff --git a/bolt/unittests/Core/MCPlusBuilder.cpp b/bolt/unittests/Core/MCPlusBuilder.cpp index 439d72a343ce8..7b6f1620a3f2c 100644 --- a/bolt/unittests/Core/MCPlusBuilder.cpp +++ b/bolt/unittests/Core/MCPlusBuilder.cpp @@ -156,6 +156,8 @@ TEST_P(MCPlusBuilderTester, AArch64_BTI) { ASSERT_EQ(II->getOpcode(), AArch64::HINT); ASSERT_EQ(II->getOperand(0).getImm(), 38); ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, true)); + BC->MIB->updateBTIVariant(*II, true, false); + ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, false)); MCInst BTIj; BC->MIB->createBTI(BTIj, false, true); @@ -163,6 +165,8 @@ TEST_P(MCPlusBuilderTester, AArch64_BTI) { ASSERT_EQ(II->getOpcode(), AArch64::HINT); ASSERT_EQ(II->getOperand(0).getImm(), 36); ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, false, true)); + BC->MIB->updateBTIVariant(*II, true, true); + ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, true)); MCInst BTIc; BC->MIB->createBTI(BTIc, true, false); @@ -170,10 +174,14 @@ TEST_P(MCPlusBuilderTester, AArch64_BTI) { ASSERT_EQ(II->getOpcode(), AArch64::HINT); ASSERT_EQ(II->getOperand(0).getImm(), 34); ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, true, false)); + BC->MIB->updateBTIVariant(*II, false, true); + ASSERT_TRUE(BC->MIB->isBTILandingPad(*II, false, true)); +#ifndef NDEBUG MCInst BTIinvalid; ASSERT_DEATH(BC->MIB->createBTI(BTIinvalid, false, false), "No target kinds!"); +#endif MCInst Paciasp = MCInstBuilder(AArch64::PACIASP); II = BB->addInstruction(Paciasp); diff --git a/clang-tools-extra/clangd/CompileCommands.cpp b/clang-tools-extra/clangd/CompileCommands.cpp index 7990f2719e9a0..4eda330716f21 100644 --- a/clang-tools-extra/clangd/CompileCommands.cpp +++ b/clang-tools-extra/clangd/CompileCommands.cpp @@ -132,8 +132,7 @@ std::optional detectSysroot() { std::string detectStandardResourceDir() { static int StaticForMainAddr; // Just an address in this process. - return CompilerInvocation::GetResourcesPath("clangd", - (void *)&StaticForMainAddr); + return GetResourcesPath("clangd", (void *)&StaticForMainAddr); } // The path passed to argv[0] is important: diff --git a/clang-tools-extra/clangd/Compiler.cpp b/clang-tools-extra/clangd/Compiler.cpp index 6ebc2eac25745..9ea7df139382a 100644 --- a/clang-tools-extra/clangd/Compiler.cpp +++ b/clang-tools-extra/clangd/Compiler.cpp @@ -9,6 +9,7 @@ #include "Compiler.h" #include "support/Logger.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Lex/PreprocessorOptions.h" #include "clang/Serialization/PCHContainerOperations.h" diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst index 94d6f0d27619f..4f81a084dd65b 100644 --- a/clang/docs/ClangFormatStyleOptions.rst +++ b/clang/docs/ClangFormatStyleOptions.rst @@ -4765,9 +4765,21 @@ the configuration (without a prefix: ``Auto``). Decimal: 3 Hex: -1 - You can also specify a minimum number of digits (``BinaryMinDigits``, - ``DecimalMinDigits``, and ``HexMinDigits``) the integer literal must - have in order for the separators to be inserted. + You can also specify a minimum number of digits + (``BinaryMinDigitsInsert``, ``DecimalMinDigitsInsert``, and + ``HexMinDigitsInsert``) the integer literal must have in order for the + separators to be inserted, and a maximum number of digits + (``BinaryMaxDigitsRemove``, ``DecimalMaxDigitsRemove``, and + ``HexMaxDigitsRemove``) until the separators are removed. This divides the + literals in 3 regions, always without separator (up until including + ``xxxMaxDigitsRemove``), maybe with, or without separators (up until + excluding ``xxxMinDigitsInsert``), and finally always with separators. + + .. note:: + + ``BinaryMinDigits``, ``DecimalMinDigits``, and ``HexMinDigits`` are + deprecated and renamed to ``BinaryMinDigitsInsert``, + ``DecimalMinDigitsInsert``, and ``HexMinDigitsInsert``, respectively. * ``int8_t Binary`` Format separators in binary literals. @@ -4778,15 +4790,28 @@ the configuration (without a prefix: ``Auto``). /* 3: */ b = 0b100'111'101'101; /* 4: */ b = 0b1001'1110'1101; - * ``int8_t BinaryMinDigits`` Format separators in binary literals with a minimum number of digits. + * ``int8_t BinaryMinDigitsInsert`` Format separators in binary literals with a minimum number of digits. .. code-block:: text // Binary: 3 - // BinaryMinDigits: 7 + // BinaryMinDigitsInsert: 7 b1 = 0b101101; b2 = 0b1'101'101; + * ``int8_t BinaryMaxDigitsRemove`` Remove separators in binary literals with a maximum number of digits. + + .. code-block:: text + + // Binary: 3 + // BinaryMinDigitsInsert: 7 + // BinaryMaxDigitsRemove: 4 + b0 = 0b1011; // Always removed. + b1 = 0b101101; // Not added. + b2 = 0b1'01'101; // Not removed, not corrected. + b3 = 0b1'101'101; // Always added. + b4 = 0b10'1101; // Corrected to 0b101'101. + * ``int8_t Decimal`` Format separators in decimal literals. .. code-block:: text @@ -4795,15 +4820,28 @@ the configuration (without a prefix: ``Auto``). /* 0: */ d = 184467'440737'0'95505'92ull; /* 3: */ d = 18'446'744'073'709'550'592ull; - * ``int8_t DecimalMinDigits`` Format separators in decimal literals with a minimum number of digits. + * ``int8_t DecimalMinDigitsInsert`` Format separators in decimal literals with a minimum number of digits. .. code-block:: text // Decimal: 3 - // DecimalMinDigits: 5 + // DecimalMinDigitsInsert: 5 d1 = 2023; d2 = 10'000; + * ``int8_t DecimalMaxDigitsRemove`` Remove separators in decimal literals with a maximum number of digits. + + .. code-block:: text + + // Decimal: 3 + // DecimalMinDigitsInsert: 7 + // DecimalMaxDigitsRemove: 4 + d0 = 2023; // Always removed. + d1 = 123456; // Not added. + d2 = 1'23'456; // Not removed, not corrected. + d3 = 5'000'000; // Always added. + d4 = 1'23'45; // Corrected to 12'345. + * ``int8_t Hex`` Format separators in hexadecimal literals. .. code-block:: text @@ -4812,16 +4850,30 @@ the configuration (without a prefix: ``Auto``). /* 0: */ h = 0xDEAD'BEEF'DE'AD'BEE'Fuz; /* 2: */ h = 0xDE'AD'BE'EF'DE'AD'BE'EFuz; - * ``int8_t HexMinDigits`` Format separators in hexadecimal literals with a minimum number of + * ``int8_t HexMinDigitsInsert`` Format separators in hexadecimal literals with a minimum number of digits. .. code-block:: text // Hex: 2 - // HexMinDigits: 6 + // HexMinDigitsInsert: 6 h1 = 0xABCDE; h2 = 0xAB'CD'EF; + * ``int8_t HexMaxDigitsRemove`` Remove separators in hexadecimal literals with a maximum number of + digits. + + .. code-block:: text + + // Hex: 2 + // HexMinDigitsInsert: 6 + // HexMaxDigitsRemove: 4 + h0 = 0xAFFE; // Always removed. + h1 = 0xABCDE; // Not added. + h2 = 0xABC'DE; // Not removed, not corrected. + h3 = 0xAB'CD'EF; // Always added. + h4 = 0xABCD'E; // Corrected to 0xA'BC'DE. + .. _JavaImportGroups: diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst index f7e6061044c6d..e7ca7b0bd0792 100644 --- a/clang/docs/OpenMPSupport.rst +++ b/clang/docs/OpenMPSupport.rst @@ -580,6 +580,8 @@ implementation. | need_device_addr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Parsing/Sema: https://github.com/llvm/llvm-project/pull/143442 | | | | | https://github.com/llvm/llvm-project/pull/149586 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ +| need_device_ptr modifier for adjust_args clause | :part:`unclaimed` | :none:`unclaimed` | | ++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | Prescriptive num_threads | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/160659 | | | | | https://github.com/llvm/llvm-project/pull/146403 | | | | | https://github.com/llvm/llvm-project/pull/146404 | @@ -631,7 +633,9 @@ implementation. | | | | RT: @abhinavgaba (https://github.com/llvm/llvm-project/pull/149036, | | | | | https://github.com/llvm/llvm-project/pull/158370) | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ - +| need_device_ptr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Clang Parsing/Sema: https://github.com/llvm/llvm-project/pull/168905 | +| | | | https://github.com/llvm/llvm-project/pull/169558 | ++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ OpenMP Extensions ================= diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index c6a79ed71ca2f..4cfcd37df1866 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -84,6 +84,8 @@ Potentially Breaking Changes - Downstream projects that previously linked only against ``clangDriver`` may now (also) need to link against the new ``clangOptions`` library, since options-related code has been moved out of the Driver into a separate library. +- The ``clangFrontend`` library no longer depends on ``clangDriver``, which may + break downstream projects that relied on this transitive dependency. C/C++ Language Potentially Breaking Changes ------------------------------------------- @@ -394,6 +396,7 @@ Improvements to Clang's diagnostics - Fixed false positives in ``-Waddress-of-packed-member`` diagnostics when potential misaligned members get processed before they can get discarded. (#GH144729) +- Fix a false positive warning in ``-Wignored-qualifiers`` when the return type is undeduced. (#GH43054) - Clang now emits a diagnostic with the correct message in case of assigning to const reference captured in lambda. (#GH105647) @@ -701,6 +704,9 @@ clang-format ``AlignAfterOpenBracket`` option, and make ``AlignAfterOpenBracket`` a ``bool`` type. - Add ``AlignPPAndNotPP`` suboption to ``AlignTrailingComments``. +- Rename ``(Binary|Decimal|Hex)MinDigits`` to ``...MinDigitsInsert`` and add + ``(Binary|Decimal|Hex)MaxDigitsSeparator`` suboptions to + ``IntegerLiteralSeparator``. libclang -------- @@ -761,6 +767,9 @@ OpenMP Support - Updated parsing and semantic analysis support for ``nowait`` clause to accept optional argument in OpenMP >= 60. - Added support for ``default`` clause on ``target`` directive. +- Added parsing and semantic analysis support for ``need_device_ptr`` modifier + to accept an optional fallback argument (``fb_nullify`` or ``fb_preserve``) + with OpenMP >= 61. Improvements ^^^^^^^^^^^^ diff --git a/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h b/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h index f02969e0a9563..1a16fb82f9a84 100644 --- a/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h +++ b/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h @@ -38,6 +38,11 @@ bool isAssignmentOperatorLifetimeBound(const CXXMethodDecl *CMD); /// method or because it's a normal assignment operator. bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD); +// Tells whether the type is annotated with [[gsl::Pointer]]. +bool isGslPointerType(QualType QT); +// Tells whether the type is annotated with [[gsl::Owner]]. +bool isGslOwnerType(QualType QT); + } // namespace clang::lifetimes #endif // LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMEANNOTATIONS_H diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td index 4aa3d51931980..fcc3957f9b8ab 100644 --- a/clang/include/clang/Basic/BuiltinsX86.td +++ b/clang/include/clang/Basic/BuiltinsX86.td @@ -2371,7 +2371,8 @@ let Features = "avx512vl", def pternlogq256_maskz : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">; } -let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { +let Features = "avx512f", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def shuf_f32x4 : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<16, float>, _Constant int)">; def shuf_f64x2 : X86Builtin<"_Vector<8, double>(_Vector<8, double>, _Vector<8, double>, _Constant int)">; def shuf_i32x4 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Constant int)">; @@ -2391,7 +2392,8 @@ let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVecto : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<16, int>)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { +let Features = "avx512vl", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { def shuf_f32x4_256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Constant int)">; def shuf_f64x2_256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Constant int)">; def shuf_i32x4_256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Constant int)">; diff --git a/clang/include/clang/Basic/DebugOptions.def b/clang/include/clang/Basic/DebugOptions.def index ea3636ffa1af1..34f5a313947a4 100644 --- a/clang/include/clang/Basic/DebugOptions.def +++ b/clang/include/clang/Basic/DebugOptions.def @@ -65,6 +65,9 @@ DEBUGOPT(DebugKeyInstructions, 1, 0, Benign) DEBUGOPT(DebugColumnInfo, 1, 0, Compatible) ///< Whether or not to use column information ///< in debug info. +/// Whether or not to include call site information in debug info. +DEBUGOPT(DebugCallSiteInfo, 1, 1, Benign) + DEBUGOPT(DebugTypeExtRefs, 1, 0, Compatible) ///< Whether or not debug info should contain ///< external references to a PCH or module. diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index f262db55a0d92..aeffe96e806bd 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -851,9 +851,6 @@ def warn_drv_sarif_format_unstable : Warning< "diagnostic formatting in SARIF mode is currently unstable">, InGroup>; -def err_drv_riscv_unsupported_with_linker_relaxation : Error< - "%0 is unsupported with RISC-V linker relaxation (-mrelax)">; - def warn_drv_loongarch_conflicting_implied_val : Warning< "ignoring '%0' as it conflicts with that implied by '%1' (%2)">, InGroup; diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index 53aa86a7dabde..4a145fd71eedd 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -8661,6 +8661,8 @@ def err_conditional_vector_size : Error< def err_conditional_vector_element_size : Error< "vector condition type %0 and result type %1 do not have elements of the " "same size">; +def err_conditional_vector_scalar_type_unsupported : Error< + "scalar type %0 not supported with vector condition type %1">; def err_conditional_vector_has_void : Error< "GNU vector conditional operand cannot be %select{void|a throw expression}0">; def err_conditional_vector_operand_type diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a19c4f951fff9..777b49434f119 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4612,6 +4612,16 @@ def CIR_ExpOp : CIR_UnaryFPToFPBuiltinOp<"exp", "ExpOp"> { }]; } +def CIR_Exp2Op : CIR_UnaryFPToFPBuiltinOp<"exp2", "Exp2Op"> { + let summary = "Computes the floating-point base-2 exponential value"; + let description = [{ + `cir.exp2` computes the base-2 exponential of a floating-point operand and + returns a result of the same type. + + Floating-point exceptions are ignored, and it does not set `errno`. + }]; +} + def CIR_FAbsOp : CIR_UnaryFPToFPBuiltinOp<"fabs", "FAbsOp"> { let summary = "Computes the floating-point absolute value"; let description = [{ diff --git a/clang/include/clang/Driver/CommonArgs.h b/clang/include/clang/Driver/CommonArgs.h index ac17d6211d882..264bd4965f9ad 100644 --- a/clang/include/clang/Driver/CommonArgs.h +++ b/clang/include/clang/Driver/CommonArgs.h @@ -291,16 +291,6 @@ void handleVectorizeLoopsArgs(const llvm::opt::ArgList &Args, void handleVectorizeSLPArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs); -// Parse -mprefer-vector-width=. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, - const llvm::opt::ArgList &Args); - -// Parse -mrecip. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef parseMRecipOption(clang::DiagnosticsEngine &Diags, - const llvm::opt::ArgList &Args); - // Convert ComplexRangeKind to a string that can be passed as a frontend option. std::string complexRangeKindToStr(LangOptions::ComplexRangeKind Range); diff --git a/clang/include/clang/Driver/CreateASTUnitFromArgs.h b/clang/include/clang/Driver/CreateASTUnitFromArgs.h new file mode 100644 index 0000000000000..30575cc04ca7c --- /dev/null +++ b/clang/include/clang/Driver/CreateASTUnitFromArgs.h @@ -0,0 +1,80 @@ +//===-- CreateInvocationFromArgs.h - Create an ASTUnit from Args-*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility for creating an ASTUnit from a vector of command line arguments. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_DRIVER_CREATEASTUNITFROMARGS_H +#define LLVM_CLANG_DRIVER_CREATEASTUNITFROMARGS_H + +#include "clang/Frontend/ASTUnit.h" + +namespace clang { + +/// Create an ASTUnit from a vector of command line arguments, which must +/// specify exactly one source file. +/// +/// \param ArgBegin - The beginning of the argument vector. +/// +/// \param ArgEnd - The end of the argument vector. +/// +/// \param PCHContainerOps - The PCHContainerOperations to use for loading and +/// creating modules. +/// +/// \param Diags - The diagnostics engine to use for reporting errors; its +/// lifetime is expected to extend past that of the returned ASTUnit. +/// +/// \param ResourceFilesPath - The path to the compiler resource files. +/// +/// \param StorePreamblesInMemory - Whether to store PCH in memory. If false, +/// PCH are stored in temporary files. +/// +/// \param PreambleStoragePath - The path to a directory, in which to create +/// temporary PCH files. If empty, the default system temporary directory is +/// used. This parameter is ignored if \p StorePreamblesInMemory is true. +/// +/// \param ModuleFormat - If provided, uses the specific module format. +/// +/// \param ErrAST - If non-null and parsing failed without any AST to return +/// (e.g. because the PCH could not be loaded), this accepts the ASTUnit +/// mainly to allow the caller to see the diagnostics. +/// +/// \param VFS - A llvm::vfs::FileSystem to be used for all file accesses. +/// Note that preamble is saved to a temporary directory on a RealFileSystem, +/// so in order for it to be loaded correctly, VFS should have access to +/// it(i.e., be an overlay over RealFileSystem). RealFileSystem will be used +/// if \p VFS is nullptr. +/// +// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we +// shouldn't need to specify them at construction time. +std::unique_ptr CreateASTUnitFromCommandLine( + const char **ArgBegin, const char **ArgEnd, + std::shared_ptr PCHContainerOps, + std::shared_ptr DiagOpts, + IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, + bool StorePreamblesInMemory = false, + StringRef PreambleStoragePath = StringRef(), bool OnlyLocalDecls = false, + CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None, + ArrayRef RemappedFiles = {}, + bool RemappedFilesKeepOriginalName = true, + unsigned PrecompilePreambleAfterNParses = 0, + TranslationUnitKind TUKind = TU_Complete, + bool CacheCodeCompletionResults = false, + bool IncludeBriefCommentsInCodeCompletion = false, + bool AllowPCHWithCompilerErrors = false, + SkipFunctionBodiesScope SkipFunctionBodies = SkipFunctionBodiesScope::None, + bool SingleFileParse = false, bool UserFilesAreVolatile = false, + bool ForSerialization = false, bool RetainExcludedConditionalBlocks = false, + std::optional ModuleFormat = std::nullopt, + std::unique_ptr *ErrAST = nullptr, + IntrusiveRefCntPtr VFS = nullptr); + +} // namespace clang + +#endif // LLVM_CLANG_DRIVER_CREATEASTUNITFROMARGS_H diff --git a/clang/include/clang/Driver/CreateInvocationFromArgs.h b/clang/include/clang/Driver/CreateInvocationFromArgs.h new file mode 100644 index 0000000000000..0e0f67373ce87 --- /dev/null +++ b/clang/include/clang/Driver/CreateInvocationFromArgs.h @@ -0,0 +1,76 @@ +//===--- CreateInvocationFromArgs.h - CompilerInvocation from Args --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility for creating a CompilerInvocation from command-line arguments, for +// tools to use in preparation to parse a file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_DRIVER_CREATEINVOCATIONFROMARGS_H +#define LLVM_CLANG_DRIVER_CREATEINVOCATIONFROMARGS_H + +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/LLVM.h" +#include "llvm/Support/VirtualFileSystem.h" +#include +#include +#include + +namespace clang { + +class CompilerInvocation; +class DiagnosticsEngine; + +/// Optional inputs to createInvocation. +struct CreateInvocationOptions { + /// Receives diagnostics encountered while parsing command-line flags. + /// If not provided, these are printed to stderr. + IntrusiveRefCntPtr Diags = nullptr; + /// Used e.g. to probe for system headers locations. + /// If not provided, the real filesystem is used. + /// FIXME: the driver does perform some non-virtualized IO. + IntrusiveRefCntPtr VFS = nullptr; + /// Whether to attempt to produce a non-null (possibly incorrect) invocation + /// if any errors were encountered. + /// By default, always return null on errors. + bool RecoverOnError = false; + /// Allow the driver to probe the filesystem for PCH files. + /// This is used to replace -include with -include-pch in the cc1 args. + /// FIXME: ProbePrecompiled=true is a poor, historical default. + /// It misbehaves if the PCH file is from GCC, has the wrong version, etc. + bool ProbePrecompiled = false; + /// If set, the target is populated with the cc1 args produced by the driver. + /// This may be populated even if createInvocation returns nullptr. + std::vector *CC1Args = nullptr; +}; + +/// Interpret clang arguments in preparation to parse a file. +/// +/// This simulates a number of steps Clang takes when its driver is invoked: +/// - choosing actions (e.g compile + link) to run +/// - probing the system for settings like standard library locations +/// - spawning a cc1 subprocess to compile code, with more explicit arguments +/// - in the cc1 process, assembling those arguments into a CompilerInvocation +/// which is used to configure the parser +/// +/// This simulation is lossy, e.g. in some situations one driver run would +/// result in multiple parses. (Multi-arch, CUDA, ...). +/// This function tries to select a reasonable invocation that tools should use. +/// +/// Args[0] should be the driver name, such as "clang" or "/usr/bin/g++". +/// Absolute path is preferred - this affects searching for system headers. +/// +/// May return nullptr if an invocation could not be determined. +/// See CreateInvocationOptions::RecoverOnError to try harder! +std::unique_ptr +createInvocation(ArrayRef Args, + CreateInvocationOptions Opts = {}); + +} // namespace clang + +#endif // LLVM_CLANG_DRIVER_CREATEINVOCATIONFROMARGS_H diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h index 83bcb7cab550f..76a6c5a128efb 100644 --- a/clang/include/clang/Driver/Driver.h +++ b/clang/include/clang/Driver/Driver.h @@ -406,10 +406,6 @@ class Driver { SmallString<128> &CrashDiagDir); public: - /// Takes the path to a binary that's either in bin/ or lib/ and returns - /// the path to clang's resource directory. - static std::string GetResourcesPath(StringRef BinaryPath); - Driver(StringRef ClangExecutable, StringRef TargetTriple, DiagnosticsEngine &Diags, std::string Title = "clang LLVM compiler", IntrusiveRefCntPtr VFS = nullptr); diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h index b6f124f948b59..c7e57d47f9ed1 100644 --- a/clang/include/clang/Format/Format.h +++ b/clang/include/clang/Format/Format.h @@ -3275,9 +3275,20 @@ struct FormatStyle { /// Hex: -1 /// \endcode /// - /// You can also specify a minimum number of digits (``BinaryMinDigits``, - /// ``DecimalMinDigits``, and ``HexMinDigits``) the integer literal must - /// have in order for the separators to be inserted. + /// You can also specify a minimum number of digits + /// (``BinaryMinDigitsInsert``, ``DecimalMinDigitsInsert``, and + /// ``HexMinDigitsInsert``) the integer literal must have in order for the + /// separators to be inserted, and a maximum number of digits + /// (``BinaryMaxDigitsRemove``, ``DecimalMaxDigitsRemove``, and + /// ``HexMaxDigitsRemove``) until the separators are removed. This divides the + /// literals in 3 regions, always without separator (up until including + /// ``xxxMaxDigitsRemove``), maybe with, or without separators (up until + /// excluding ``xxxMinDigitsInsert``), and finally always with separators. + /// \note + /// ``BinaryMinDigits``, ``DecimalMinDigits``, and ``HexMinDigits`` are + /// deprecated and renamed to ``BinaryMinDigitsInsert``, + /// ``DecimalMinDigitsInsert``, and ``HexMinDigitsInsert``, respectively. + /// \endnote struct IntegerLiteralSeparatorStyle { /// Format separators in binary literals. /// \code{.text} @@ -3290,11 +3301,23 @@ struct FormatStyle { /// Format separators in binary literals with a minimum number of digits. /// \code{.text} /// // Binary: 3 - /// // BinaryMinDigits: 7 + /// // BinaryMinDigitsInsert: 7 /// b1 = 0b101101; /// b2 = 0b1'101'101; /// \endcode - int8_t BinaryMinDigits; + int8_t BinaryMinDigitsInsert; + /// Remove separators in binary literals with a maximum number of digits. + /// \code{.text} + /// // Binary: 3 + /// // BinaryMinDigitsInsert: 7 + /// // BinaryMaxDigitsRemove: 4 + /// b0 = 0b1011; // Always removed. + /// b1 = 0b101101; // Not added. + /// b2 = 0b1'01'101; // Not removed, not corrected. + /// b3 = 0b1'101'101; // Always added. + /// b4 = 0b10'1101; // Corrected to 0b101'101. + /// \endcode + int8_t BinaryMaxDigitsRemove; /// Format separators in decimal literals. /// \code{.text} /// /* -1: */ d = 18446744073709550592ull; @@ -3305,11 +3328,23 @@ struct FormatStyle { /// Format separators in decimal literals with a minimum number of digits. /// \code{.text} /// // Decimal: 3 - /// // DecimalMinDigits: 5 + /// // DecimalMinDigitsInsert: 5 /// d1 = 2023; /// d2 = 10'000; /// \endcode - int8_t DecimalMinDigits; + int8_t DecimalMinDigitsInsert; + /// Remove separators in decimal literals with a maximum number of digits. + /// \code{.text} + /// // Decimal: 3 + /// // DecimalMinDigitsInsert: 7 + /// // DecimalMaxDigitsRemove: 4 + /// d0 = 2023; // Always removed. + /// d1 = 123456; // Not added. + /// d2 = 1'23'456; // Not removed, not corrected. + /// d3 = 5'000'000; // Always added. + /// d4 = 1'23'45; // Corrected to 12'345. + /// \endcode + int8_t DecimalMaxDigitsRemove; /// Format separators in hexadecimal literals. /// \code{.text} /// /* -1: */ h = 0xDEADBEEFDEADBEEFuz; @@ -3321,15 +3356,36 @@ struct FormatStyle { /// digits. /// \code{.text} /// // Hex: 2 - /// // HexMinDigits: 6 + /// // HexMinDigitsInsert: 6 /// h1 = 0xABCDE; /// h2 = 0xAB'CD'EF; /// \endcode - int8_t HexMinDigits; + int8_t HexMinDigitsInsert; + /// Remove separators in hexadecimal literals with a maximum number of + /// digits. + /// \code{.text} + /// // Hex: 2 + /// // HexMinDigitsInsert: 6 + /// // HexMaxDigitsRemove: 4 + /// h0 = 0xAFFE; // Always removed. + /// h1 = 0xABCDE; // Not added. + /// h2 = 0xABC'DE; // Not removed, not corrected. + /// h3 = 0xAB'CD'EF; // Always added. + /// h4 = 0xABCD'E; // Corrected to 0xA'BC'DE. + /// \endcode + int8_t HexMaxDigitsRemove; bool operator==(const IntegerLiteralSeparatorStyle &R) const { - return Binary == R.Binary && BinaryMinDigits == R.BinaryMinDigits && - Decimal == R.Decimal && DecimalMinDigits == R.DecimalMinDigits && - Hex == R.Hex && HexMinDigits == R.HexMinDigits; + return Binary == R.Binary && + BinaryMinDigitsInsert == R.BinaryMinDigitsInsert && + BinaryMaxDigitsRemove == R.BinaryMaxDigitsRemove && + Decimal == R.Decimal && + DecimalMinDigitsInsert == R.DecimalMinDigitsInsert && + DecimalMaxDigitsRemove == R.DecimalMaxDigitsRemove && + Hex == R.Hex && HexMinDigitsInsert == R.HexMinDigitsInsert && + HexMaxDigitsRemove == R.HexMaxDigitsRemove; + } + bool operator!=(const IntegerLiteralSeparatorStyle &R) const { + return !operator==(R); } }; diff --git a/clang/include/clang/Frontend/ASTUnit.h b/clang/include/clang/Frontend/ASTUnit.h index e585933a5c8be..341460e1962cb 100644 --- a/clang/include/clang/Frontend/ASTUnit.h +++ b/clang/include/clang/Frontend/ASTUnit.h @@ -23,11 +23,13 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetOptions.h" #include "clang/Frontend/PrecompiledPreamble.h" +#include "clang/Frontend/StandaloneDiagnostic.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/ModuleLoader.h" #include "clang/Lex/PreprocessingRecord.h" #include "clang/Sema/CodeCompleteConsumer.h" #include "clang/Serialization/ASTBitCodes.h" +#include "clang/Serialization/ASTWriter.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" @@ -36,6 +38,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator_range.h" +#include "llvm/Bitstream/BitstreamWriter.h" #include #include #include @@ -88,25 +91,6 @@ enum class CaptureDiagsKind { None, All, AllWithoutNonErrorsFromIncludes }; /// Utility class for loading a ASTContext from an AST file. class ASTUnit { -public: - struct StandaloneFixIt { - std::pair RemoveRange; - std::pair InsertFromRange; - std::string CodeToInsert; - bool BeforePreviousInsertions; - }; - - struct StandaloneDiagnostic { - unsigned ID; - DiagnosticsEngine::Level Level; - std::string Message; - std::string Filename; - unsigned LocOffset; - std::vector> Ranges; - std::vector FixIts; - }; - -private: std::unique_ptr LangOpts; std::unique_ptr CodeGenOpts; // FIXME: The documentation on \c LoadFrom* member functions states that the @@ -129,7 +113,15 @@ class ASTUnit { bool HadModuleLoaderFatalFailure = false; bool StorePreamblesInMemory = false; - struct ASTWriterData; + /// Utility struct for managing ASTWriter and its associated data streams. + struct ASTWriterData { + SmallString<128> Buffer; + llvm::BitstreamWriter Stream; + ASTWriter Writer; + + ASTWriterData(ModuleCache &ModCache, const CodeGenOptions &CGOpts) + : Stream(Buffer), Writer(Stream, Buffer, ModCache, CGOpts, {}) {} + }; std::unique_ptr WriterData; FileSystemOptions FileSystemOpts; @@ -271,11 +263,6 @@ class ASTUnit { static void ConfigureDiags(IntrusiveRefCntPtr Diags, ASTUnit &AST, CaptureDiagsKind CaptureDiagnostics); - void - TranslateStoredDiagnostics(FileManager &FileMgr, SourceManager &SrcMan, - const SmallVectorImpl &Diags, - SmallVectorImpl &Out); - void clearFileLevelDecls(); public: @@ -834,65 +821,24 @@ class ASTUnit { bool IncludeBriefCommentsInCodeCompletion = false, bool UserFilesAreVolatile = false); - /// LoadFromCommandLine - Create an ASTUnit from a vector of command line - /// arguments, which must specify exactly one source file. - /// - /// \param ArgBegin - The beginning of the argument vector. - /// - /// \param ArgEnd - The end of the argument vector. - /// - /// \param PCHContainerOps - The PCHContainerOperations to use for loading and - /// creating modules. - /// - /// \param Diags - The diagnostics engine to use for reporting errors; its - /// lifetime is expected to extend past that of the returned ASTUnit. - /// - /// \param ResourceFilesPath - The path to the compiler resource files. - /// - /// \param StorePreamblesInMemory - Whether to store PCH in memory. If false, - /// PCH are stored in temporary files. - /// - /// \param PreambleStoragePath - The path to a directory, in which to create - /// temporary PCH files. If empty, the default system temporary directory is - /// used. This parameter is ignored if \p StorePreamblesInMemory is true. - /// - /// \param ModuleFormat - If provided, uses the specific module format. - /// - /// \param ErrAST - If non-null and parsing failed without any AST to return - /// (e.g. because the PCH could not be loaded), this accepts the ASTUnit - /// mainly to allow the caller to see the diagnostics. - /// - /// \param VFS - A llvm::vfs::FileSystem to be used for all file accesses. - /// Note that preamble is saved to a temporary directory on a RealFileSystem, - /// so in order for it to be loaded correctly, VFS should have access to - /// it(i.e., be an overlay over RealFileSystem). RealFileSystem will be used - /// if \p VFS is nullptr. - /// - // FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we - // shouldn't need to specify them at construction time. - static std::unique_ptr LoadFromCommandLine( + friend std::unique_ptr CreateASTUnitFromCommandLine( const char **ArgBegin, const char **ArgEnd, std::shared_ptr PCHContainerOps, std::shared_ptr DiagOpts, IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, - bool StorePreamblesInMemory = false, - StringRef PreambleStoragePath = StringRef(), bool OnlyLocalDecls = false, - CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None, - ArrayRef RemappedFiles = {}, - bool RemappedFilesKeepOriginalName = true, - unsigned PrecompilePreambleAfterNParses = 0, - TranslationUnitKind TUKind = TU_Complete, - bool CacheCodeCompletionResults = false, - bool IncludeBriefCommentsInCodeCompletion = false, - bool AllowPCHWithCompilerErrors = false, - SkipFunctionBodiesScope SkipFunctionBodies = - SkipFunctionBodiesScope::None, - bool SingleFileParse = false, bool UserFilesAreVolatile = false, - bool ForSerialization = false, - bool RetainExcludedConditionalBlocks = false, - std::optional ModuleFormat = std::nullopt, - std::unique_ptr *ErrAST = nullptr, - IntrusiveRefCntPtr VFS = nullptr); + bool StorePreamblesInMemory, StringRef PreambleStoragePath, + bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics, + ArrayRef RemappedFiles, + bool RemappedFilesKeepOriginalName, + unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind, + bool CacheCodeCompletionResults, + bool IncludeBriefCommentsInCodeCompletion, + bool AllowPCHWithCompilerErrors, + SkipFunctionBodiesScope SkipFunctionBodies, bool SingleFileParse, + bool UserFilesAreVolatile, bool ForSerialization, + bool RetainExcludedConditionalBlocks, + std::optional ModuleFormat, std::unique_ptr *ErrAST, + IntrusiveRefCntPtr VFS); /// Reparse the source files using the same command-line options that /// were originally used to produce this translation unit. @@ -963,6 +909,44 @@ class ASTUnit { bool serialize(raw_ostream &OS); }; +/// Diagnostic consumer that saves each diagnostic it is given. +class FilterAndStoreDiagnosticConsumer : public DiagnosticConsumer { + SmallVectorImpl *StoredDiags; + SmallVectorImpl *StandaloneDiags; + bool CaptureNonErrorsFromIncludes = true; + const LangOptions *LangOpts = nullptr; + SourceManager *SourceMgr = nullptr; + +public: + FilterAndStoreDiagnosticConsumer( + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags, + bool CaptureNonErrorsFromIncludes); + + void BeginSourceFile(const LangOptions &LangOpts, + const Preprocessor *PP = nullptr) override; + + void HandleDiagnostic(DiagnosticsEngine::Level Level, + const Diagnostic &Info) override; +}; + +/// RAII object that optionally captures and filters diagnostics, if +/// there is no diagnostic client to capture them already. +class CaptureDroppedDiagnostics { + DiagnosticsEngine &Diags; + FilterAndStoreDiagnosticConsumer Client; + DiagnosticConsumer *PreviousClient = nullptr; + std::unique_ptr OwningPreviousClient; + +public: + CaptureDroppedDiagnostics( + CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags, + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags); + + ~CaptureDroppedDiagnostics(); +}; + } // namespace clang #endif // LLVM_CLANG_FRONTEND_ASTUNIT_H diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h index b19a6e1a8acc3..4977ddb307d21 100644 --- a/clang/include/clang/Frontend/CompilerInvocation.h +++ b/clang/include/clang/Frontend/CompilerInvocation.h @@ -299,16 +299,6 @@ class CompilerInvocation : public CompilerInvocationBase { DiagnosticsEngine &Diags, const char *Argv0 = nullptr); - /// Get the directory where the compiler headers - /// reside, relative to the compiler binary (found by the passed in - /// arguments). - /// - /// \param Argv0 - The program path (from argv[0]), for finding the builtin - /// compiler path. - /// \param MainAddr - The address of main (or some other function in the main - /// executable), for finding the builtin compiler path. - static std::string GetResourcesPath(const char *Argv0, void *MainAddr); - /// Populate \p Opts with the default set of pointer authentication-related /// options given \p LangOpts and \p Triple. /// diff --git a/clang/include/clang/Frontend/StandaloneDiagnostic.h b/clang/include/clang/Frontend/StandaloneDiagnostic.h new file mode 100644 index 0000000000000..c23d5f95e0c2f --- /dev/null +++ b/clang/include/clang/Frontend/StandaloneDiagnostic.h @@ -0,0 +1,82 @@ +//===--- StandaloneDiagnostic.h - Serializable Diagnostic -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A serializable diagnostic representation to retain diagnostics after their +// SourceManager has been destroyed. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_FRONTEND_STANDALONEDIAGNOSTICS_H +#define LLVM_CLANG_FRONTEND_STANDALONEDIAGNOSTICS_H + +#include "clang/Basic/DiagnosticIDs.h" +#include "clang/Basic/DiagnosticOptions.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/Specifiers.h" +#include "llvm/ADT/StringExtras.h" +#include +#include +#include + +namespace clang { + +/// Represents a StoredDiagnostic in a form that can be retained until after its +/// SourceManager has been destroyed. +/// +/// Source locations are stored as a combination of filename and offsets into +/// that file. +/// To report the diagnostic, it must first be translated back into a +/// StoredDiagnostic with a new associated SourceManager. +struct StandaloneDiagnostic { + /// Represents a CharSourceRange within a StandaloneDiagnostic. + struct SourceOffsetRange { + SourceOffsetRange(CharSourceRange Range, const SourceManager &SrcMgr, + const LangOptions &LangOpts); + + unsigned Begin = 0; + unsigned End = 0; + }; + + /// Represents a FixItHint within a StandaloneDiagnostic. + struct StandaloneFixIt { + StandaloneFixIt(const SourceManager &SrcMgr, const LangOptions &LangOpts, + const FixItHint &FixIt); + + SourceOffsetRange RemoveRange; + SourceOffsetRange InsertFromRange; + std::string CodeToInsert; + bool BeforePreviousInsertions; + }; + + StandaloneDiagnostic(const LangOptions &LangOpts, + const StoredDiagnostic &InDiag); + + DiagnosticsEngine::Level Level; + SrcMgr::CharacteristicKind FileKind; + unsigned ID = 0; + unsigned FileOffset = 0; + std::string Message; + std::string Filename; + std::vector Ranges; + std::vector FixIts; +}; + +/// Translates \c StandaloneDiag into a StoredDiagnostic, associating it with +/// the provided FileManager and SourceManager. +/// +/// This allows the diagnostic to be emitted using the diagnostics engine, since +/// StandaloneDiagnostics themselfs cannot be emitted directly. +StoredDiagnostic +translateStandaloneDiag(FileManager &FileMgr, SourceManager &SrcMgr, + const StandaloneDiagnostic &StandaloneDiag, + llvm::StringMap &SrcLocCache); + +} // namespace clang + +#endif // STANDALONEDIAGNOSTICS diff --git a/clang/include/clang/Frontend/Utils.h b/clang/include/clang/Frontend/Utils.h index ed2703c76f18d..1c561b47b5c47 100644 --- a/clang/include/clang/Frontend/Utils.h +++ b/clang/include/clang/Frontend/Utils.h @@ -192,51 +192,6 @@ IntrusiveRefCntPtr createChainedIncludesSource(CompilerInstance &CI, IntrusiveRefCntPtr &OutReader); -/// Optional inputs to createInvocation. -struct CreateInvocationOptions { - /// Receives diagnostics encountered while parsing command-line flags. - /// If not provided, these are printed to stderr. - IntrusiveRefCntPtr Diags = nullptr; - /// Used e.g. to probe for system headers locations. - /// If not provided, the real filesystem is used. - /// FIXME: the driver does perform some non-virtualized IO. - IntrusiveRefCntPtr VFS = nullptr; - /// Whether to attempt to produce a non-null (possibly incorrect) invocation - /// if any errors were encountered. - /// By default, always return null on errors. - bool RecoverOnError = false; - /// Allow the driver to probe the filesystem for PCH files. - /// This is used to replace -include with -include-pch in the cc1 args. - /// FIXME: ProbePrecompiled=true is a poor, historical default. - /// It misbehaves if the PCH file is from GCC, has the wrong version, etc. - bool ProbePrecompiled = false; - /// If set, the target is populated with the cc1 args produced by the driver. - /// This may be populated even if createInvocation returns nullptr. - std::vector *CC1Args = nullptr; -}; - -/// Interpret clang arguments in preparation to parse a file. -/// -/// This simulates a number of steps Clang takes when its driver is invoked: -/// - choosing actions (e.g compile + link) to run -/// - probing the system for settings like standard library locations -/// - spawning a cc1 subprocess to compile code, with more explicit arguments -/// - in the cc1 process, assembling those arguments into a CompilerInvocation -/// which is used to configure the parser -/// -/// This simulation is lossy, e.g. in some situations one driver run would -/// result in multiple parses. (Multi-arch, CUDA, ...). -/// This function tries to select a reasonable invocation that tools should use. -/// -/// Args[0] should be the driver name, such as "clang" or "/usr/bin/g++". -/// Absolute path is preferred - this affects searching for system headers. -/// -/// May return nullptr if an invocation could not be determined. -/// See CreateInvocationOptions::ShouldRecoverOnErrors to try harder! -std::unique_ptr -createInvocation(ArrayRef Args, - CreateInvocationOptions Opts = {}); - } // namespace clang #endif // LLVM_CLANG_FRONTEND_UTILS_H diff --git a/clang/include/clang/Options/OptionUtils.h b/clang/include/clang/Options/OptionUtils.h index 83c48bd7d6843..02c9c27554db1 100644 --- a/clang/include/clang/Options/OptionUtils.h +++ b/clang/include/clang/Options/OptionUtils.h @@ -28,6 +28,7 @@ class ArgList; } // namespace llvm namespace clang { + /// Return the value of the last argument as an integer, or a default. If Diags /// is non-null, emits an error if the argument is given, but non-integral. int getLastArgIntValue(const llvm::opt::ArgList &Args, @@ -53,6 +54,29 @@ inline uint64_t getLastArgUInt64Value(const llvm::opt::ArgList &Args, return getLastArgUInt64Value(Args, Id, Default, &Diags, Base); } +// Parse -mprefer-vector-width=. Return the Value string if well-formed. +// Otherwise, return an empty string and issue a diagnosic message if needed. +StringRef parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, + const llvm::opt::ArgList &Args); + +// Parse -mrecip. Return the Value string if well-formed. +// Otherwise, return an empty string and issue a diagnosic message if needed. +StringRef parseMRecipOption(clang::DiagnosticsEngine &Diags, + const llvm::opt::ArgList &Args); + +/// Get the directory where the compiler headers reside, relative to the +/// compiler binary path \p BinaryPath. +std::string GetResourcesPath(StringRef BinaryPath); + +/// Get the directory where the compiler headers reside, relative to the +/// compiler binary path (found by the passed in arguments). +/// +/// \param Argv0 The program path (from argv[0]), for finding the builtin +/// compiler path. +/// \param MainAddr The address of main (or some other function in the main +/// executable), for finding the builtin compiler path. +std::string GetResourcesPath(const char *Argv0, void *MainAddr); + } // namespace clang #endif // LLVM_CLANG_OPTIONS_OPTIONUTILS_H diff --git a/clang/include/clang/Options/Options.td b/clang/include/clang/Options/Options.td index a8fc1c4326cc5..756d6deed7130 100644 --- a/clang/include/clang/Options/Options.td +++ b/clang/include/clang/Options/Options.td @@ -1427,6 +1427,16 @@ def fhip_emit_relocatable : Flag<["-"], "fhip-emit-relocatable">, HelpText<"Compile HIP source to relocatable">; def fno_hip_emit_relocatable : Flag<["-"], "fno-hip-emit-relocatable">, HelpText<"Do not override toolchain to compile HIP source to relocatable">; +def use_spirv_backend + : Flag<["-"], "use-spirv-backend">, + Group, + Flags<[HelpHidden]>, + HelpText<"Use the SPIRV backend for compilation ">; +def no_use_spirv_backend + : Flag<["-"], "no-use-spirv-backend">, + Group, + Flags<[HelpHidden]>, + HelpText<"Do not use the SPIRV backend for compilation ">; } // Clang specific/exclusive options for OpenACC. @@ -4832,6 +4842,14 @@ defm column_info : BoolOption<"g", "column-info", NegFlag, PosFlag, BothFlags<[], [ClangOption, CLOption, DXCOption]>>, Group; +defm call_site_info : BoolOption<"g", "call-site-info", + CodeGenOpts<"DebugCallSiteInfo">, + DefaultTrue, + PosFlag, + NegFlag, + BothFlags<[], [ClangOption, CC1Option], " call site debug info">>, + Group, + DocBrief<[{Call site debug info enables various debugger features including detecting tail calls for display in backtraces and displaying some source variable values that reference the call entry value.}]>; def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group, diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index ae500139ee6f7..78ecbccbe4efc 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -8721,10 +8721,6 @@ class Sema final : public SemaBase { ExprResult &RHS, SourceLocation QuestionLoc); - QualType CheckSizelessVectorConditionalTypes(ExprResult &Cond, - ExprResult &LHS, ExprResult &RHS, - SourceLocation QuestionLoc); - //// Determines if a type is trivially relocatable /// according to the C++26 rules. // FIXME: This is in Sema because it requires diff --git a/clang/lib/AST/ByteCode/Integral.h b/clang/lib/AST/ByteCode/Integral.h index 5bf6621874e69..e90f1a9a74e1c 100644 --- a/clang/lib/AST/ByteCode/Integral.h +++ b/clang/lib/AST/ByteCode/Integral.h @@ -217,10 +217,6 @@ template class Integral final { return Integral(Value.V); } - static bool inRange(int64_t Value, unsigned NumBits) { - return CheckRange(Value); - } - static bool increment(Integral A, Integral *R) { return add(A, Integral(ReprT(1)), A.bitWidth(), R); } @@ -323,13 +319,6 @@ template class Integral final { return false; } } - template static bool CheckRange(int64_t V) { - if constexpr (std::is_signed_v) { - return Min <= V && V <= Max; - } else { - return V >= 0 && static_cast(V) <= Max; - } - } }; template diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 4222fd97a84fa..c3210d7119b40 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -2081,15 +2081,15 @@ bool InvalidShuffleVectorIndex(InterpState &S, CodePtr OpPC, uint32_t Index) { bool CheckPointerToIntegralCast(InterpState &S, CodePtr OpPC, const Pointer &Ptr, unsigned BitWidth) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); + if (Ptr.isDummy()) return false; if (Ptr.isFunctionPointer()) return true; - const SourceInfo &E = S.Current->getSource(OpPC); - S.CCEDiag(E, diag::note_constexpr_invalid_cast) - << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); - if (Ptr.isBlockPointer() && !Ptr.isZero()) { // Only allow based lvalue casts if they are lossless. if (S.getASTContext().getTargetInfo().getPointerWidth(LangAS::Default) != diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index 86b1ba88ca9d4..d8b8b209fa927 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -2646,10 +2646,6 @@ template ::T> bool CastPointerIntegral(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop(); - S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_cast) - << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret - << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); - if (!CheckPointerToIntegralCast(S, OpPC, Ptr, T::bitWidth())) return Invalid(S, OpPC); diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 2ab40ac9cc89c..659b6985d3157 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -4847,6 +4847,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, return interp__builtin_elementwise_triop(S, OpPC, Call, llvm::APIntOps::fshr); + case X86::BI__builtin_ia32_shuf_f32x4_256: + case X86::BI__builtin_ia32_shuf_i32x4_256: + case X86::BI__builtin_ia32_shuf_f64x2_256: + case X86::BI__builtin_ia32_shuf_i64x2_256: + case X86::BI__builtin_ia32_shuf_f32x4: + case X86::BI__builtin_ia32_shuf_i32x4: + case X86::BI__builtin_ia32_shuf_f64x2: + case X86::BI__builtin_ia32_shuf_i64x2: { + // Destination and sources A, B all have the same type. + QualType VecQT = Call->getArg(0)->getType(); + const auto *VecT = VecQT->castAs(); + unsigned NumElems = VecT->getNumElements(); + unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType()); + unsigned LaneBits = 128u; + unsigned NumLanes = (NumElems * ElemBits) / LaneBits; + unsigned NumElemsPerLane = LaneBits / ElemBits; + + return interp__builtin_ia32_shuffle_generic( + S, OpPC, Call, + [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) { + // DstIdx determines source. ShuffleMask selects lane in source. + unsigned BitsPerElem = NumLanes / 2; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned Lane = DstIdx / NumElemsPerLane; + unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1; + unsigned BitIdx = BitsPerElem * Lane; + unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask; + unsigned ElemInLane = DstIdx % NumElemsPerLane; + unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane; + return std::pair{SrcIdx, IdxToPick}; + }); + } + case X86::BI__builtin_ia32_insertf32x4_256: case X86::BI__builtin_ia32_inserti32x4_256: case X86::BI__builtin_ia32_insertf64x2_256: diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp index c468303efea7e..d96934071cb60 100644 --- a/clang/lib/AST/ByteCode/Program.cpp +++ b/clang/lib/AST/ByteCode/Program.cpp @@ -27,7 +27,7 @@ unsigned Program::getOrCreateNativePointer(const void *Ptr) { return It->second; } -const void *Program::getNativePointer(unsigned Idx) { +const void *Program::getNativePointer(unsigned Idx) const { return NativePointers[Idx]; } diff --git a/clang/lib/AST/ByteCode/Program.h b/clang/lib/AST/ByteCode/Program.h index cc9127dc77860..c8795504391fa 100644 --- a/clang/lib/AST/ByteCode/Program.h +++ b/clang/lib/AST/ByteCode/Program.h @@ -58,7 +58,7 @@ class Program final { unsigned getOrCreateNativePointer(const void *Ptr); /// Returns the value of a marshalled native pointer. - const void *getNativePointer(unsigned Idx); + const void *getNativePointer(unsigned Idx) const; /// Emits a string literal among global data. unsigned createGlobalString(const StringLiteral *S, diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 3b91678f7d400..88d7c79d3b99e 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -13517,6 +13517,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } + case X86::BI__builtin_ia32_shuf_f32x4_256: + case X86::BI__builtin_ia32_shuf_i32x4_256: + case X86::BI__builtin_ia32_shuf_f64x2_256: + case X86::BI__builtin_ia32_shuf_i64x2_256: + case X86::BI__builtin_ia32_shuf_f32x4: + case X86::BI__builtin_ia32_shuf_i32x4: + case X86::BI__builtin_ia32_shuf_f64x2: + case X86::BI__builtin_ia32_shuf_i64x2: { + APValue SourceA, SourceB; + if (!EvaluateAsRValue(Info, E->getArg(0), SourceA) || + !EvaluateAsRValue(Info, E->getArg(1), SourceB)) + return false; + + APSInt Imm; + if (!EvaluateInteger(E->getArg(2), Imm, Info)) + return false; + + // Destination and sources A, B all have the same type. + unsigned NumElems = SourceA.getVectorLength(); + const VectorType *VT = E->getArg(0)->getType()->castAs(); + QualType ElemQT = VT->getElementType(); + unsigned ElemBits = Info.Ctx.getTypeSize(ElemQT); + unsigned LaneBits = 128u; + unsigned NumLanes = (NumElems * ElemBits) / LaneBits; + unsigned NumElemsPerLane = LaneBits / ElemBits; + + unsigned DstLen = SourceA.getVectorLength(); + SmallVector ResultElements; + ResultElements.reserve(DstLen); + + APValue R; + if (!evalShuffleGeneric( + Info, E, R, + [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) + -> std::pair { + // DstIdx determines source. ShuffleMask selects lane in source. + unsigned BitsPerElem = NumLanes / 2; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned Lane = DstIdx / NumElemsPerLane; + unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1; + unsigned BitIdx = BitsPerElem * Lane; + unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask; + unsigned ElemInLane = DstIdx % NumElemsPerLane; + unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane; + return {SrcIdx, IdxToPick}; + })) + return false; + return Success(R, E); + } + case X86::BI__builtin_ia32_insertf32x4_256: case X86::BI__builtin_ia32_inserti32x4_256: case X86::BI__builtin_ia32_insertf64x2_256: diff --git a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp index f7be472ed15b5..00870c3fd4086 100644 --- a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp +++ b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp @@ -15,18 +15,6 @@ namespace clang::lifetimes::internal { using llvm::isa_and_present; -static bool isGslPointerType(QualType QT) { - if (const auto *RD = QT->getAsCXXRecordDecl()) { - // We need to check the template definition for specializations. - if (auto *CTSD = dyn_cast(RD)) - return CTSD->getSpecializedTemplate() - ->getTemplatedDecl() - ->hasAttr(); - return RD->hasAttr(); - } - return false; -} - static bool isPointerType(QualType QT) { return QT->isPointerOrReferenceType() || isGslPointerType(QT); } diff --git a/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp b/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp index ad61a42c0eaeb..54e343fc2ee5e 100644 --- a/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp +++ b/clang/lib/Analysis/LifetimeSafety/LifetimeAnnotations.cpp @@ -10,6 +10,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" @@ -70,4 +71,34 @@ bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) { return isNormalAssignmentOperator(FD); } +template static bool isRecordWithAttr(QualType Type) { + auto *RD = Type->getAsCXXRecordDecl(); + if (!RD) + return false; + // Generally, if a primary template class declaration is annotated with an + // attribute, all its specializations generated from template instantiations + // should inherit the attribute. + // + // However, since lifetime analysis occurs during parsing, we may encounter + // cases where a full definition of the specialization is not required. In + // such cases, the specialization declaration remains incomplete and lacks the + // attribute. Therefore, we fall back to checking the primary template class. + // + // Note: it is possible for a specialization declaration to have an attribute + // even if the primary template does not. + // + // FIXME: What if the primary template and explicit specialization + // declarations have conflicting attributes? We should consider diagnosing + // this scenario. + bool Result = RD->hasAttr(); + + if (auto *CTSD = dyn_cast(RD)) + Result |= CTSD->getSpecializedTemplate()->getTemplatedDecl()->hasAttr(); + + return Result; +} + +bool isGslPointerType(QualType QT) { return isRecordWithAttr(QT); } +bool isGslOwnerType(QualType QT) { return isRecordWithAttr(QT); } + } // namespace clang::lifetimes diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 48c082d89de18..4c94db5ddd457 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -644,6 +644,9 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, case AtomicExpr::AO__scoped_atomic_nand_fetch: case AtomicExpr::AO__scoped_atomic_fetch_nand: + + case AtomicExpr::AO__scoped_atomic_uinc_wrap: + case AtomicExpr::AO__scoped_atomic_udec_wrap: cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI"); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 4f0fe918e778f..7d4d13121d5e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -300,6 +300,17 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, assert(!cir::MissingFeatures::fastMathFlags()); return emitUnaryMaybeConstrainedFPBuiltin(*this, *e); + case Builtin::BIexp2: + case Builtin::BIexp2f: + case Builtin::BIexp2l: + case Builtin::BI__builtin_exp2: + case Builtin::BI__builtin_exp2f: + case Builtin::BI__builtin_exp2f16: + case Builtin::BI__builtin_exp2l: + case Builtin::BI__builtin_exp2f128: + assert(!cir::MissingFeatures::fastMathFlags()); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *e); + case Builtin::BIfabs: case Builtin::BIfabsf: case Builtin::BIfabsl: diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index e7aa8a234efd9..0ebfd51b82b56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -21,13 +21,11 @@ using namespace clang; using namespace clang::CIRGen; template -static mlir::Value emitIntrinsicCallOp(CIRGenFunction &cgf, const CallExpr *e, - const std::string &str, +static mlir::Value emitIntrinsicCallOp(CIRGenBuilderTy &builder, + mlir::Location loc, const StringRef str, const mlir::Type &resTy, Operands &&...op) { - CIRGenBuilderTy &builder = cgf.getBuilder(); - mlir::Location location = cgf.getLoc(e->getExprLoc()); - return cir::LLVMIntrinsicCallOp::create(builder, location, + return cir::LLVMIntrinsicCallOp::create(builder, loc, builder.getStringAttr(str), resTy, std::forward(op)...) .getResult(); @@ -68,10 +66,8 @@ static mlir::Value emitVectorFCmp(CIRGenBuilderTy &builder, return bitCast; } -static mlir::Value getMaskVecValue(CIRGenFunction &cgf, const CallExpr *expr, +static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Value mask, unsigned numElems) { - - CIRGenBuilderTy &builder = cgf.getBuilder(); auto maskTy = cir::VectorType::get( builder.getUIntNTy(1), cast(mask.getType()).getWidth()); mlir::Value maskVec = builder.createBitcast(mask, maskTy); @@ -84,12 +80,42 @@ static mlir::Value getMaskVecValue(CIRGenFunction &cgf, const CallExpr *expr, for (auto i : llvm::seq(0, numElems)) indices.push_back(cir::IntAttr::get(i32Ty, i)); - maskVec = builder.createVecShuffle(cgf.getLoc(expr->getExprLoc()), maskVec, - maskVec, indices); + maskVec = builder.createVecShuffle(loc, maskVec, maskVec, indices); } return maskVec; } +static mlir::Value emitX86MaskAddLogic(CIRGenBuilderTy &builder, + mlir::Location loc, + const std::string &intrinsicName, + SmallVectorImpl &ops) { + + auto intTy = cast(ops[0].getType()); + unsigned numElts = intTy.getWidth(); + mlir::Value lhsVec = getMaskVecValue(cgf, loc, ops[0], numElts); + mlir::Value rhsVec = getMaskVecValue(cgf, loc, ops[1], numElts); + mlir::Type vecTy = lhsVec.getType(); + mlir::Value resVec = emitIntrinsicCallOp(cgf, loc, intrinsicName, vecTy, + mlir::ValueRange{lhsVec, rhsVec}); + return builder.createBitcast(resVec, ops[0].getType()); +} + +static mlir::Value emitX86MaskLogic(CIRGenFunction &cgf, mlir::Location loc, + cir::BinOpKind binOpKind, + SmallVectorImpl &ops, + bool invertLHS = false) { + CIRGenBuilderTy &builder = cgf.getBuilder(); + unsigned numElts = cast(ops[0].getType()).getWidth(); + mlir::Value lhs = getMaskVecValue(cgf, loc, ops[0], numElts); + mlir::Value rhs = getMaskVecValue(cgf, loc, ops[1], numElts); + + if (invertLHS) + lhs = builder.createNot(lhs); + return builder.createBitcast( + builder.createBinop(loc), lhs, binOpKind, rhs), + ops[0].getType()); +} + mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr) { if (builtinID == Builtin::BI__builtin_cpu_is) { @@ -132,15 +158,20 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, default: return {}; case X86::BI_mm_clflush: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.clflush", voidTy, ops[0]); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.clflush", voidTy, ops[0]); case X86::BI_mm_lfence: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.lfence", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.lfence", voidTy); case X86::BI_mm_pause: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.pause", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.pause", voidTy); case X86::BI_mm_mfence: - return emitIntrinsicCallOp(*this, expr, "x86.sse2.mfence", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse2.mfence", voidTy); case X86::BI_mm_sfence: - return emitIntrinsicCallOp(*this, expr, "x86.sse.sfence", voidTy); + return emitIntrinsicCallOp(builder, getLoc(expr->getExprLoc()), + "x86.sse.sfence", voidTy); case X86::BI_mm_prefetch: case X86::BI__rdtsc: case X86::BI__builtin_ia32_rdtscp: { @@ -152,15 +183,17 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_lzcnt_u16: case X86::BI__builtin_ia32_lzcnt_u32: case X86::BI__builtin_ia32_lzcnt_u64: { - mlir::Value isZeroPoison = builder.getFalse(getLoc(expr->getExprLoc())); - return emitIntrinsicCallOp(*this, expr, "ctlz", ops[0].getType(), + mlir::Location loc = getLoc(expr->getExprLoc()); + mlir::Value isZeroPoison = builder.getFalse(loc); + return emitIntrinsicCallOp(builder, loc, "ctlz", ops[0].getType(), mlir::ValueRange{ops[0], isZeroPoison}); } case X86::BI__builtin_ia32_tzcnt_u16: case X86::BI__builtin_ia32_tzcnt_u32: case X86::BI__builtin_ia32_tzcnt_u64: { - mlir::Value isZeroPoison = builder.getFalse(getLoc(expr->getExprLoc())); - return emitIntrinsicCallOp(*this, expr, "cttz", ops[0].getType(), + mlir::Location loc = getLoc(expr->getExprLoc()); + mlir::Value isZeroPoison = builder.getFalse(loc); + return emitIntrinsicCallOp(builder, loc, "cttz", ops[0].getType(), mlir::ValueRange{ops[0], isZeroPoison}); } case X86::BI__builtin_ia32_undef128: @@ -216,14 +249,14 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, mlir::Location loc = getLoc(expr->getExprLoc()); Address tmp = createMemTemp(expr->getArg(0)->getType(), loc); builder.createStore(loc, ops[0], tmp); - return emitIntrinsicCallOp(*this, expr, "x86.sse.ldmxcsr", + return emitIntrinsicCallOp(builder, loc, "x86.sse.ldmxcsr", builder.getVoidTy(), tmp.getPointer()); } case X86::BI_mm_getcsr: case X86::BI__builtin_ia32_stmxcsr: { mlir::Location loc = getLoc(expr->getExprLoc()); Address tmp = createMemTemp(expr->getType(), loc); - emitIntrinsicCallOp(*this, expr, "x86.sse.stmxcsr", builder.getVoidTy(), + emitIntrinsicCallOp(builder, loc, "x86.sse.stmxcsr", builder.getVoidTy(), tmp.getPointer()); return builder.createLoad(loc, tmp); } @@ -605,50 +638,48 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_kshiftlihi: case X86::BI__builtin_ia32_kshiftlisi: case X86::BI__builtin_ia32_kshiftlidi: { + mlir::Location loc = getLoc(expr->getExprLoc()); unsigned shiftVal = ops[1].getDefiningOp().getIntValue().getZExtValue() & 0xff; unsigned numElems = cast(ops[0].getType()).getWidth(); if (shiftVal >= numElems) - return builder.getNullValue(ops[0].getType(), getLoc(expr->getExprLoc())); + return builder.getNullValue(ops[0].getType(), loc); - mlir::Value in = getMaskVecValue(*this, expr, ops[0], numElems); + mlir::Value in = getMaskVecValue(builder, loc, ops[0], numElems); SmallVector indices; mlir::Type i32Ty = builder.getSInt32Ty(); for (auto i : llvm::seq(0, numElems)) indices.push_back(cir::IntAttr::get(i32Ty, numElems + i - shiftVal)); - mlir::Value zero = - builder.getNullValue(in.getType(), getLoc(expr->getExprLoc())); - mlir::Value sv = - builder.createVecShuffle(getLoc(expr->getExprLoc()), zero, in, indices); + mlir::Value zero = builder.getNullValue(in.getType(), loc); + mlir::Value sv = builder.createVecShuffle(loc, zero, in, indices); return builder.createBitcast(sv, ops[0].getType()); } case X86::BI__builtin_ia32_kshiftriqi: case X86::BI__builtin_ia32_kshiftrihi: case X86::BI__builtin_ia32_kshiftrisi: case X86::BI__builtin_ia32_kshiftridi: { + mlir::Location loc = getLoc(expr->getExprLoc()); unsigned shiftVal = ops[1].getDefiningOp().getIntValue().getZExtValue() & 0xff; unsigned numElems = cast(ops[0].getType()).getWidth(); if (shiftVal >= numElems) - return builder.getNullValue(ops[0].getType(), getLoc(expr->getExprLoc())); + return builder.getNullValue(ops[0].getType(), loc); - mlir::Value in = getMaskVecValue(*this, expr, ops[0], numElems); + mlir::Value in = getMaskVecValue(builder, loc, ops[0], numElems); SmallVector indices; mlir::Type i32Ty = builder.getSInt32Ty(); for (auto i : llvm::seq(0, numElems)) indices.push_back(cir::IntAttr::get(i32Ty, i + shiftVal)); - mlir::Value zero = - builder.getNullValue(in.getType(), getLoc(expr->getExprLoc())); - mlir::Value sv = - builder.createVecShuffle(getLoc(expr->getExprLoc()), in, zero, indices); + mlir::Value zero = builder.getNullValue(in.getType(), loc); + mlir::Value sv = builder.createVecShuffle(loc, in, zero, indices); return builder.createBitcast(sv, ops[0].getType()); } case X86::BI__builtin_ia32_vprotbi: @@ -743,38 +774,75 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID, case X86::BI__builtin_ia32_ktestzsi: case X86::BI__builtin_ia32_ktestcdi: case X86::BI__builtin_ia32_ktestzdi: + cgm.errorNYI(expr->getSourceRange(), + std::string("unimplemented X86 builtin call: ") + + getContext().BuiltinInfo.getName(builtinID)); + return {}; case X86::BI__builtin_ia32_kaddqi: + return emitX86MaskAddLogic(*this, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.b", ops); case X86::BI__builtin_ia32_kaddhi: + return emitX86MaskAddLogic(*this, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.w", ops); case X86::BI__builtin_ia32_kaddsi: + return emitX86MaskAddLogic(*this, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.d", ops); case X86::BI__builtin_ia32_kadddi: + return emitX86MaskAddLogic(*this, getLoc(expr->getExprLoc()), + "x86.avx512.kadd.q", ops); case X86::BI__builtin_ia32_kandqi: case X86::BI__builtin_ia32_kandhi: case X86::BI__builtin_ia32_kandsi: case X86::BI__builtin_ia32_kanddi: + return emitX86MaskLogic(*this, getLoc(expr->getExprLoc()), + cir::BinOpKind::And, ops); case X86::BI__builtin_ia32_kandnqi: case X86::BI__builtin_ia32_kandnhi: case X86::BI__builtin_ia32_kandnsi: case X86::BI__builtin_ia32_kandndi: + return emitX86MaskLogic(*this, getLoc(expr->getExprLoc()), + cir::BinOpKind::And, ops, true); case X86::BI__builtin_ia32_korqi: case X86::BI__builtin_ia32_korhi: case X86::BI__builtin_ia32_korsi: case X86::BI__builtin_ia32_kordi: + return emitX86MaskLogic(*this, getLoc(expr->getExprLoc()), + cir::BinOpKind::Or, ops); case X86::BI__builtin_ia32_kxnorqi: case X86::BI__builtin_ia32_kxnorhi: case X86::BI__builtin_ia32_kxnorsi: case X86::BI__builtin_ia32_kxnordi: + return emitX86MaskLogic(*this, getLoc(expr->getExprLoc()), + cir::BinOpKind::Xor, ops, true); case X86::BI__builtin_ia32_kxorqi: case X86::BI__builtin_ia32_kxorhi: case X86::BI__builtin_ia32_kxorsi: case X86::BI__builtin_ia32_kxordi: + return emitX86MaskLogic(*this, getLoc(expr->getExprLoc()), + cir::BinOpKind::Xor, ops); case X86::BI__builtin_ia32_knotqi: case X86::BI__builtin_ia32_knothi: case X86::BI__builtin_ia32_knotsi: - case X86::BI__builtin_ia32_knotdi: + case X86::BI__builtin_ia32_knotdi: { + cir::IntType intTy = cast(ops[0].getType()); + unsigned numElts = intTy.getWidth(); + mlir::Value resVec = + getMaskVecValue(*this, getLoc(expr->getExprLoc()), ops[0], numElts); + return builder.createBitcast(builder.createNot(resVec), ops[0].getType()); + } case X86::BI__builtin_ia32_kmovb: case X86::BI__builtin_ia32_kmovw: case X86::BI__builtin_ia32_kmovd: - case X86::BI__builtin_ia32_kmovq: + case X86::BI__builtin_ia32_kmovq: { + // Bitcast to vXi1 type and then back to integer. This gets the mask + // register type into the IR, but might be optimized out depending on + // what's around it. + cir::IntType intTy = cast(ops[0].getType()); + unsigned numElts = intTy.getWidth(); + mlir::Value resVec = + getMaskVecValue(*this, getLoc(expr->getExprLoc()), ops[0], numElts); + return builder.createBitcast(resVec, ops[0].getType()); + } case X86::BI__builtin_ia32_kunpckdi: case X86::BI__builtin_ia32_kunpcksi: case X86::BI__builtin_ia32_kunpckhi: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7c94743d5ffc6..a8c2061ddbd6c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -2325,14 +2325,45 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( const QualType typeToSize = e->getTypeOfArgument(); const mlir::Location loc = cgf.getLoc(e->getSourceRange()); if (auto kind = e->getKind(); - kind == UETT_SizeOf || kind == UETT_DataSizeOf) { - if (cgf.getContext().getAsVariableArrayType(typeToSize)) { - cgf.getCIRGenModule().errorNYI(e->getSourceRange(), - "sizeof operator for VariableArrayType", - e->getStmtClassName()); - return builder.getConstant( - loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, - llvm::APSInt(llvm::APInt(64, 1), true))); + kind == UETT_SizeOf || kind == UETT_DataSizeOf || kind == UETT_CountOf) { + if (const VariableArrayType *vat = + cgf.getContext().getAsVariableArrayType(typeToSize)) { + // For _Countof, we only want to evaluate if the extent is actually + // variable as opposed to a multi-dimensional array whose extent is + // constant but whose element type is variable. + bool evaluateExtent = true; + if (kind == UETT_CountOf && vat->getElementType()->isArrayType()) { + evaluateExtent = + !vat->getSizeExpr()->isIntegerConstantExpr(cgf.getContext()); + } + + if (evaluateExtent) { + if (e->isArgumentType()) { + // sizeof(type) - make sure to emit the VLA size. + cgf.emitVariablyModifiedType(typeToSize); + } else { + // C99 6.5.3.4p2: If the argument is an expression of type + // VLA, it is evaluated. + cgf.getCIRGenModule().errorNYI( + e->getSourceRange(), + "sizeof operator for VariableArrayType & evaluateExtent " + "ignoredExpr", + e->getStmtClassName()); + return {}; + } + + // For _Countof, we just want to return the size of a single dimension. + if (kind == UETT_CountOf) + return cgf.getVLAElements1D(vat).numElts; + + cgf.getCIRGenModule().errorNYI( + e->getSourceRange(), + "sizeof operator for VariableArrayType & evaluateExtent", + e->getStmtClassName()); + return builder.getConstant( + loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, + -llvm::APSInt(llvm::APInt(64, 1), true))); + } } } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) { cgf.getCIRGenModule().errorNYI( diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 33bdfa315a9ea..22128ed3521f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1138,6 +1138,14 @@ CIRGenFunction::getVLASize(const VariableArrayType *type) { return {numElements, elementType}; } +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLAElements1D(const VariableArrayType *vla) { + mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == sizeTy); + return {vlaSize, vla->getElementType()}; +} + // TODO(cir): Most of this function can be shared between CIRGen // and traditional LLVM codegen void CIRGenFunction::emitVariablyModifiedType(QualType type) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9adac089ea28b..b6926bb88ac85 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -498,6 +498,10 @@ class CIRGenFunction : public CIRGenTypeCache { VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {} }; + /// Return the number of elements for a single dimension + /// for the given array type. + VlaSizePair getVLAElements1D(const VariableArrayType *vla); + /// Returns an MLIR::Value+QualType pair that corresponds to the size, /// in non-variably-sized elements, of a variable length array type, /// plus that largest non-variably-sized element type. Assumes that diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6bf543cf794b7..f1bacff7fc691 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -330,6 +330,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, "zero expects struct, array, vector, or complex type"); } + if (mlir::isa(attrType)) { + if (!mlir::isa(opType)) + return success(); + return op->emitOpError("undef expects non-void type"); + } + if (mlir::isa(attrType)) { if (!mlir::isa(opType)) return op->emitOpError("result type (") diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6136d48204e0c..0c34d87734c3e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -202,6 +202,14 @@ mlir::LogicalResult CIRToLLVMExpOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMExp2OpLowering::matchAndRewrite( + cir::Exp2Op op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Type resTy = typeConverter->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, resTy, adaptor.getSrc()); + return mlir::success(); +} + static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::Type llvmDstIntTy, bool isUnsigned, uint64_t cirSrcWidth, @@ -232,7 +240,7 @@ class CIRAttrToValue { .Case( + cir::UndefAttr, cir::VTableAttr, cir::ZeroAttr>( [&](auto attrT) { return visitCirAttr(attrT); }) .Default([&](auto attrT) { return mlir::Value(); }); } @@ -246,6 +254,7 @@ class CIRAttrToValue { mlir::Value visitCirAttr(cir::ConstVectorAttr attr); mlir::Value visitCirAttr(cir::GlobalViewAttr attr); mlir::Value visitCirAttr(cir::TypeInfoAttr attr); + mlir::Value visitCirAttr(cir::UndefAttr attr); mlir::Value visitCirAttr(cir::VTableAttr attr); mlir::Value visitCirAttr(cir::ZeroAttr attr); @@ -583,6 +592,13 @@ mlir::Value CIRAttrToValue::visitCirAttr(cir::TypeInfoAttr typeInfoAttr) { return result; } +/// UndefAttr visitor. +mlir::Value CIRAttrToValue::visitCirAttr(cir::UndefAttr undefAttr) { + mlir::Location loc = parentOp->getLoc(); + return mlir::LLVM::UndefOp::create( + rewriter, loc, converter->convertType(undefAttr.getType())); +} + // VTableAttr visitor. mlir::Value CIRAttrToValue::visitCirAttr(cir::VTableAttr vtableArr) { mlir::Type llvmTy = converter->convertType(vtableArr.getType()); @@ -2038,9 +2054,11 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal( cir::GlobalOp op, mlir::Attribute init, mlir::ConversionPatternRewriter &rewriter) const { // TODO: Generalize this handling when more types are needed here. - assert((isa(init))); + assert( + (isa( + init))); // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals @@ -2098,8 +2116,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( } else if (mlir::isa( - init.value())) { + cir::TypeInfoAttr, cir::UndefAttr, cir::VTableAttr, + cir::ZeroAttr>(init.value())) { // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals // to the appropriate value. diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 5590d217e96ff..82ca831f35da2 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -1134,6 +1134,8 @@ void EmitAssemblyHelper::RunOptimizationPipeline( CodeGenOpts.SanitizeMinimalRuntime), /*MayReturn=*/ CodeGenOpts.SanitizeRecover.has(SanitizerKind::LocalBounds), + /*HandlerPreserveAllRegs=*/ + static_cast(CodeGenOpts.SanitizeHandlerPreserveAllRegs), }; } FPM.addPass(BoundsCheckingPass(Options)); diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index 4eb99cc342275..c50f372c1f331 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -6519,7 +6519,8 @@ llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const { // when there's a possibility of debugging backtraces. if (CGM.getCodeGenOpts().OptimizationLevel == 0 || DebugKind == llvm::codegenoptions::NoDebugInfo || - DebugKind == llvm::codegenoptions::LocTrackingOnly) + DebugKind == llvm::codegenoptions::LocTrackingOnly || + !CGM.getCodeGenOpts().DebugCallSiteInfo) return llvm::DINode::FlagZero; // Call site-related attributes are available in DWARF v5. Some debuggers, diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 221d0d6016a9c..c8f669b69d991 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -3819,6 +3819,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, bool NeedsAbortSuffix = IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; + bool HandlerPreserveAllRegs = + CGF.CGM.getCodeGenOpts().SanitizeHandlerPreserveAllRegs; const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; const StringRef CheckName = CheckInfo.Name; std::string FnName = "__ubsan_handle_" + CheckName.str(); @@ -3828,6 +3830,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, FnName += "_minimal"; if (NeedsAbortSuffix) FnName += "_abort"; + if (HandlerPreserveAllRegs && !NeedsAbortSuffix) + FnName += "_preserve"; bool MayReturn = !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; @@ -3848,6 +3852,10 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr()); if (NoMerge) HandlerCall->addFnAttr(llvm::Attribute::NoMerge); + if (HandlerPreserveAllRegs && !NeedsAbortSuffix) { + // N.B. there is also a clang::CallingConv which is not what we want here. + HandlerCall->setCallingConv(llvm::CallingConv::PreserveAll); + } if (!MayReturn) { HandlerCall->setDoesNotReturn(); CGF.Builder.CreateUnreachable(); diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 2f69a53787f0c..572d59edb99b2 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -1727,7 +1727,7 @@ void CGOpenMPRuntimeGPU::emitReduction( CGF.Builder.GetInsertPoint()); llvm::OpenMPIRBuilder::LocationDescription OmpLoc( CodeGenIP, CGF.SourceLocToDebugLoc(Loc)); - llvm::SmallVector ReductionInfos; + llvm::SmallVector ReductionInfos; CodeGenFunction::OMPPrivateScope Scope(CGF); unsigned Idx = 0; @@ -1780,14 +1780,15 @@ void CGOpenMPRuntimeGPU::emitReduction( }; ReductionInfos.emplace_back(llvm::OpenMPIRBuilder::ReductionInfo( ElementType, Variable, PrivateVariable, EvalKind, - /*ReductionGen=*/nullptr, ReductionGen, AtomicReductionGen)); + /*ReductionGen=*/nullptr, ReductionGen, AtomicReductionGen, + /*DataPtrPtrGen=*/nullptr)); Idx++; } llvm::OpenMPIRBuilder::InsertPointTy AfterIP = cantFail(OMPBuilder.createReductionsGPU( - OmpLoc, AllocaIP, CodeGenIP, ReductionInfos, false, TeamsReduction, - llvm::OpenMPIRBuilder::ReductionGenCBKind::Clang, + OmpLoc, AllocaIP, CodeGenIP, ReductionInfos, /*IsByRef=*/{}, false, + TeamsReduction, llvm::OpenMPIRBuilder::ReductionGenCBKind::Clang, CGF.getTarget().getGridValue(), C.getLangOpts().OpenMPCUDAReductionBufNum, RTLoc)); CGF.Builder.restoreIP(AfterIP); diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp index dbb7bc99ac638..a49a0c91681fe 100644 --- a/clang/lib/CodeGen/CGPointerAuth.cpp +++ b/clang/lib/CodeGen/CGPointerAuth.cpp @@ -440,9 +440,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key, IntegerDiscriminator = llvm::ConstantInt::get(Int64Ty, 0); } - return llvm::ConstantPtrAuth::get(Pointer, - llvm::ConstantInt::get(Int32Ty, Key), - IntegerDiscriminator, AddressDiscriminator); + return llvm::ConstantPtrAuth::get( + Pointer, llvm::ConstantInt::get(Int32Ty, Key), IntegerDiscriminator, + AddressDiscriminator, + /*DeactivationSymbol=*/llvm::Constant::getNullValue(DefaultPtrTy)); } /// Does a given PointerAuthScheme require us to sign a value diff --git a/clang/lib/CrossTU/CMakeLists.txt b/clang/lib/CrossTU/CMakeLists.txt index 3349fc283925d..eef7a892701fb 100644 --- a/clang/lib/CrossTU/CMakeLists.txt +++ b/clang/lib/CrossTU/CMakeLists.txt @@ -9,6 +9,7 @@ add_clang_library(clangCrossTU LINK_LIBS clangAST clangBasic + clangDriver clangFrontend clangIndex ) diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp index 0287845a741ed..a3fc2cf6bfb3c 100644 --- a/clang/lib/CrossTU/CrossTranslationUnit.cpp +++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp @@ -16,6 +16,7 @@ #include "clang/Basic/DiagnosticDriver.h" #include "clang/Basic/TargetInfo.h" #include "clang/CrossTU/CrossTUDiagnostic.h" +#include "clang/Driver/CreateASTUnitFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/TextDiagnosticPrinter.h" @@ -619,7 +620,7 @@ CrossTranslationUnitContext::ASTLoader::loadFromSource( auto Diags = llvm::makeIntrusiveRefCnt(DiagID, *DiagOpts, DiagClient); - return ASTUnit::LoadFromCommandLine( + return CreateASTUnitFromCommandLine( CommandLineArgs.begin(), (CommandLineArgs.end()), CI.getPCHContainerOperations(), DiagOpts, Diags, CI.getHeaderSearchOpts().ResourceDir); diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index 8052659e9836b..d987111827597 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -17,6 +17,8 @@ endif() add_clang_library(clangDriver Action.cpp Compilation.cpp + CreateASTUnitFromArgs.cpp + CreateInvocationFromArgs.cpp Distro.cpp Driver.cpp Job.cpp @@ -96,6 +98,8 @@ add_clang_library(clangDriver LINK_LIBS clangBasic + clangFrontend + clangSerialization clangLex clangOptions ${system_libs} diff --git a/clang/lib/Driver/CreateASTUnitFromArgs.cpp b/clang/lib/Driver/CreateASTUnitFromArgs.cpp new file mode 100644 index 0000000000000..ea31a8ed07c5f --- /dev/null +++ b/clang/lib/Driver/CreateASTUnitFromArgs.cpp @@ -0,0 +1,166 @@ +//===--- CreateASTUnitFromArgs.h - Create an ASTUnit from Args ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utility for creating an ASTUnit from a vector of command line arguments. +// +//===----------------------------------------------------------------------===// + +#include "clang/Driver/CreateASTUnitFromArgs.h" +#include "clang/Driver/CreateInvocationFromArgs.h" +#include "clang/Frontend/CompilerInvocation.h" +#include "clang/Lex/PreprocessorOptions.h" +#include "clang/Serialization/ModuleCache.h" +#include "llvm/Support/CrashRecoveryContext.h" + +using namespace clang; + +/// Create an ASTUnit from a vector of command line arguments, which must +/// specify exactly one source file. +/// +/// \param ArgBegin - The beginning of the argument vector. +/// +/// \param ArgEnd - The end of the argument vector. +/// +/// \param PCHContainerOps - The PCHContainerOperations to use for loading and +/// creating modules. +/// +/// \param Diags - The diagnostics engine to use for reporting errors; its +/// lifetime is expected to extend past that of the returned ASTUnit. +/// +/// \param ResourceFilesPath - The path to the compiler resource files. +/// +/// \param StorePreamblesInMemory - Whether to store PCH in memory. If false, +/// PCH are stored in temporary files. +/// +/// \param PreambleStoragePath - The path to a directory, in which to create +/// temporary PCH files. If empty, the default system temporary directory is +/// used. This parameter is ignored if \p StorePreamblesInMemory is true. +/// +/// \param ModuleFormat - If provided, uses the specific module format. +/// +/// \param ErrAST - If non-null and parsing failed without any AST to return +/// (e.g. because the PCH could not be loaded), this accepts the ASTUnit +/// mainly to allow the caller to see the diagnostics. +/// +/// \param VFS - A llvm::vfs::FileSystem to be used for all file accesses. +/// Note that preamble is saved to a temporary directory on a RealFileSystem, +/// so in order for it to be loaded correctly, VFS should have access to +/// it(i.e., be an overlay over RealFileSystem). RealFileSystem will be used +/// if \p VFS is nullptr. +/// +// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we +// shouldn't need to specify them at construction time. +std::unique_ptr clang::CreateASTUnitFromCommandLine( + const char **ArgBegin, const char **ArgEnd, + std::shared_ptr PCHContainerOps, + std::shared_ptr DiagOpts, + IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, + bool StorePreamblesInMemory, StringRef PreambleStoragePath, + bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics, + ArrayRef RemappedFiles, + bool RemappedFilesKeepOriginalName, unsigned PrecompilePreambleAfterNParses, + TranslationUnitKind TUKind, bool CacheCodeCompletionResults, + bool IncludeBriefCommentsInCodeCompletion, bool AllowPCHWithCompilerErrors, + SkipFunctionBodiesScope SkipFunctionBodies, bool SingleFileParse, + bool UserFilesAreVolatile, bool ForSerialization, + bool RetainExcludedConditionalBlocks, std::optional ModuleFormat, + std::unique_ptr *ErrAST, + IntrusiveRefCntPtr VFS) { + assert(Diags.get() && "no DiagnosticsEngine was provided"); + + // If no VFS was provided, create one that tracks the physical file system. + // If '-working-directory' was passed as an argument, 'createInvocation' will + // set this as the current working directory of the VFS. + if (!VFS) + VFS = llvm::vfs::createPhysicalFileSystem(); + + SmallVector StoredDiagnostics; + + std::shared_ptr CI; + + { + CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags, + &StoredDiagnostics, nullptr); + + CreateInvocationOptions CIOpts; + CIOpts.VFS = VFS; + CIOpts.Diags = Diags; + CIOpts.ProbePrecompiled = true; // FIXME: historical default. Needed? + CI = createInvocation(llvm::ArrayRef(ArgBegin, ArgEnd), std::move(CIOpts)); + if (!CI) + return nullptr; + } + + // Override any files that need remapping + for (const auto &RemappedFile : RemappedFiles) { + CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first, + RemappedFile.second); + } + PreprocessorOptions &PPOpts = CI->getPreprocessorOpts(); + PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName; + PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors; + PPOpts.SingleFileParseMode = SingleFileParse; + PPOpts.RetainExcludedConditionalBlocks = RetainExcludedConditionalBlocks; + + // Override the resources path. + CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath); + + CI->getFrontendOpts().SkipFunctionBodies = + SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile; + + if (ModuleFormat) + CI->getHeaderSearchOpts().ModuleFormat = std::string(*ModuleFormat); + + // Create the AST unit. + std::unique_ptr AST; + AST.reset(new ASTUnit(false)); + AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size(); + AST->StoredDiagnostics.swap(StoredDiagnostics); + ASTUnit::ConfigureDiags(Diags, *AST, CaptureDiagnostics); + AST->DiagOpts = DiagOpts; + AST->Diagnostics = Diags; + AST->FileSystemOpts = CI->getFileSystemOpts(); + AST->CodeGenOpts = std::make_unique(CI->getCodeGenOpts()); + VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS); + AST->FileMgr = + llvm::makeIntrusiveRefCnt(AST->FileSystemOpts, VFS); + AST->StorePreamblesInMemory = StorePreamblesInMemory; + AST->PreambleStoragePath = PreambleStoragePath; + AST->ModCache = createCrossProcessModuleCache(); + AST->OnlyLocalDecls = OnlyLocalDecls; + AST->CaptureDiagnostics = CaptureDiagnostics; + AST->TUKind = TUKind; + AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults; + AST->IncludeBriefCommentsInCodeCompletion = + IncludeBriefCommentsInCodeCompletion; + AST->UserFilesAreVolatile = UserFilesAreVolatile; + AST->Invocation = CI; + AST->SkipFunctionBodies = SkipFunctionBodies; + if (ForSerialization) + AST->WriterData.reset( + new ASTUnit::ASTWriterData(*AST->ModCache, *AST->CodeGenOpts)); + // Zero out now to ease cleanup during crash recovery. + CI = nullptr; + Diags = nullptr; + + // Recover resources if we crash before exiting this method. + llvm::CrashRecoveryContextCleanupRegistrar ASTUnitCleanup(AST.get()); + + if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps), + PrecompilePreambleAfterNParses, VFS)) { + // Some error occurred, if caller wants to examine diagnostics, pass it the + // ASTUnit. + if (ErrAST) { + AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics); + ErrAST->swap(AST); + } + return nullptr; + } + + return AST; +} diff --git a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/clang/lib/Driver/CreateInvocationFromArgs.cpp similarity index 93% rename from clang/lib/Frontend/CreateInvocationFromCommandLine.cpp rename to clang/lib/Driver/CreateInvocationFromArgs.cpp index e54e83151ad1e..516d61f1a1159 100644 --- a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp +++ b/clang/lib/Driver/CreateInvocationFromArgs.cpp @@ -1,4 +1,4 @@ -//===--- CreateInvocationFromCommandLine.cpp - CompilerInvocation from Args ==// +//===--- CreateInvocationFromArgs.h - CompilerInvocation from Args --------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -10,9 +10,9 @@ // //===----------------------------------------------------------------------===// +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Basic/DiagnosticFrontend.h" #include "clang/Basic/DiagnosticOptions.h" -#include "clang/Driver/Action.h" #include "clang/Driver/Compilation.h" #include "clang/Driver/Driver.h" #include "clang/Driver/Tool.h" @@ -24,12 +24,13 @@ #include "llvm/Option/ArgList.h" #include "llvm/Support/VirtualFileSystem.h" #include "llvm/TargetParser/Host.h" -using namespace clang; + using namespace llvm::opt; +namespace clang { + std::unique_ptr -clang::createInvocation(ArrayRef ArgList, - CreateInvocationOptions Opts) { +createInvocation(ArrayRef ArgList, CreateInvocationOptions Opts) { assert(!ArgList.empty()); std::optional LocalDiagOpts; IntrusiveRefCntPtr Diags; @@ -114,3 +115,5 @@ clang::createInvocation(ArrayRef ArgList, return nullptr; return CI; } + +} // namespace clang diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index de8d4601210ae..8644a271a04b5 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -66,6 +66,7 @@ #include "clang/Driver/ToolChain.h" #include "clang/Driver/Types.h" #include "clang/Lex/DependencyDirectivesScanner.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" @@ -125,40 +126,6 @@ template static bool usesInput(const ArgList &Args, F &&Fn) { }); } -// static -std::string Driver::GetResourcesPath(StringRef BinaryPath) { - // Since the resource directory is embedded in the module hash, it's important - // that all places that need it call this function, so that they get the - // exact same string ("a/../b/" and "b/" get different hashes, for example). - - // Dir is bin/ or lib/, depending on where BinaryPath is. - StringRef Dir = llvm::sys::path::parent_path(BinaryPath); - SmallString<128> P(Dir); - - StringRef ConfiguredResourceDir(CLANG_RESOURCE_DIR); - if (!ConfiguredResourceDir.empty()) { - // FIXME: We should fix the behavior of llvm::sys::path::append so we don't - // need to check for absolute paths here. - if (llvm::sys::path::is_absolute(ConfiguredResourceDir)) - P = ConfiguredResourceDir; - else - llvm::sys::path::append(P, ConfiguredResourceDir); - } else { - // On Windows, libclang.dll is in bin/. - // On non-Windows, libclang.so/.dylib is in lib/. - // With a static-library build of libclang, LibClangPath will contain the - // path of the embedding binary, which for LLVM binaries will be in bin/. - // ../lib gets us to lib/ in both cases. - P = llvm::sys::path::parent_path(Dir); - // This search path is also created in the COFF driver of lld, so any - // changes here also needs to happen in lld/COFF/Driver.cpp - llvm::sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang", - CLANG_VERSION_MAJOR_STRING); - } - - return std::string(P); -} - CUIDOptions::CUIDOptions(llvm::opt::DerivedArgList &Args, const Driver &D) : UseCUID(Kind::Hash) { if (Arg *A = Args.getLastArg(options::OPT_fuse_cuid_EQ)) { @@ -5024,15 +4991,24 @@ Action *Driver::BuildOffloadingActions(Compilation &C, // Compiling HIP in device-only non-RDC mode requires linking each action // individually. for (Action *&A : DeviceActions) { - // Special handling for the HIP SPIR-V toolchain because it doesn't use - // the SPIR-V backend yet doesn't report the output as an object. bool IsAMDGCNSPIRV = A->getOffloadingToolChain() && A->getOffloadingToolChain()->getTriple().getOS() == llvm::Triple::OSType::AMDHSA && A->getOffloadingToolChain()->getTriple().isSPIRV(); + bool UseSPIRVBackend = Args.hasFlag(options::OPT_use_spirv_backend, + options::OPT_no_use_spirv_backend, + /*Default=*/false); + + // Special handling for the HIP SPIR-V toolchain in device-only. + // The translator path has a linking step, whereas the SPIR-V backend path + // does not to avoid any external dependency such as spirv-link. The + // linking step is skipped for the SPIR-V backend path. + bool IsAMDGCNSPIRVWithBackend = IsAMDGCNSPIRV && UseSPIRVBackend; + if ((A->getType() != types::TY_Object && !IsAMDGCNSPIRV && A->getType() != types::TY_LTO_BC) || - HIPRelocatableObj || !HIPNoRDC || !offloadDeviceOnly()) + HIPRelocatableObj || !HIPNoRDC || !offloadDeviceOnly() || + (IsAMDGCNSPIRVWithBackend && offloadDeviceOnly())) continue; ActionList LinkerInput = {A}; A = C.MakeAction(LinkerInput, types::TY_Image); @@ -5258,12 +5234,28 @@ Action *Driver::ConstructPhaseAction( Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC; return C.MakeAction(Input, Output); } + bool UseSPIRVBackend = Args.hasFlag(options::OPT_use_spirv_backend, + options::OPT_no_use_spirv_backend, + /*Default=*/false); + + auto OffloadingToolChain = Input->getOffloadingToolChain(); + // For AMD SPIRV, if offloadDeviceOnly(), we call the SPIRV backend unless + // LLVM bitcode was requested explicitly or RDC is set. If + // !offloadDeviceOnly, we emit LLVM bitcode, and clang-linker-wrapper will + // compile it to SPIRV. + bool UseSPIRVBackendForHipDeviceOnlyNoRDC = + TargetDeviceOffloadKind == Action::OFK_HIP && OffloadingToolChain && + OffloadingToolChain->getTriple().isSPIRV() && UseSPIRVBackend && + offloadDeviceOnly() && + !Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false); + if (Args.hasArg(options::OPT_emit_llvm) || TargetDeviceOffloadKind == Action::OFK_SYCL || (((Input->getOffloadingToolChain() && Input->getOffloadingToolChain()->getTriple().isAMDGPU() && TargetDeviceOffloadKind != Action::OFK_None) || TargetDeviceOffloadKind == Action::OFK_HIP) && + !UseSPIRVBackendForHipDeviceOnlyNoRDC && ((Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false) || (Args.hasFlag(options::OPT_offload_new_driver, @@ -5285,6 +5277,19 @@ Action *Driver::ConstructPhaseAction( : types::TY_LLVM_BC; return C.MakeAction(Input, Output); } + + // The SPIRV backend compilation path for HIP must avoid external + // dependencies. The default compilation path assembles and links its + // output, but the SPIRV assembler and linker are external tools. This code + // ensures the backend emits binary SPIRV directly to bypass those steps and + // avoid failures. Without -save-temps, the compiler may already skip + // assembling and linking. With -save-temps, these steps must be explicitly + // disabled, as done here. We also force skipping these steps regardless of + // -save-temps to avoid relying on optimizations (unless -S is set). + // The current HIP bundling expects the type to be types::TY_Image + if (UseSPIRVBackendForHipDeviceOnlyNoRDC && !Args.hasArg(options::OPT_S)) + return C.MakeAction(Input, types::TY_Image); + return C.MakeAction(Input, types::TY_PP_Asm); } case phases::Assemble: diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp index 9902cbbf99436..d5a0b65ab758f 100644 --- a/clang/lib/Driver/SanitizerArgs.cpp +++ b/clang/lib/Driver/SanitizerArgs.cpp @@ -419,6 +419,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, const Driver &D = TC.getDriver(); SanitizerMask TrappingKinds = parseSanitizeTrapArgs(D, Args, DiagnoseErrors); SanitizerMask InvalidTrappingKinds = TrappingKinds & NotAllowedWithTrap; + const llvm::Triple &Triple = TC.getTriple(); MinimalRuntime = Args.hasFlag(options::OPT_fsanitize_minimal_runtime, @@ -426,7 +427,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, HandlerPreserveAllRegs = Args.hasFlag(options::OPT_fsanitize_handler_preserve_all_regs, options::OPT_fno_sanitize_handler_preserve_all_regs, - HandlerPreserveAllRegs); + HandlerPreserveAllRegs) && + MinimalRuntime && (Triple.isAArch64() || Triple.isX86_64()); // The object size sanitizer should not be enabled at -O0. Arg *OptLevel = Args.getLastArg(options::OPT_O_Group); @@ -494,7 +496,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, // -fsanitize=function and -fsanitize=kcfi instrument indirect function // calls to load a type hash before the function label. Therefore, an // execute-only target doesn't support the function and kcfi sanitizers. - const llvm::Triple &Triple = TC.getTriple(); if (isExecuteOnlyTarget(Triple, Args)) { if (SanitizerMask KindsToDiagnose = Add & NotAllowedWithExecuteOnly & ~DiagnosedKinds) { diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp index 1dcce6d053a39..7fda8ea50223d 100644 --- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp +++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp @@ -130,17 +130,10 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple, #undef RESERVE_REG // -mrelax is default, unless -mno-relax is specified. - if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true)) { + if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true)) Features.push_back("+relax"); - // -gsplit-dwarf -mrelax requires DW_AT_high_pc/DW_AT_ranges/... indexing - // into .debug_addr, which is currently not implemented. - Arg *A; - if (getDebugFissionKind(D, Args, A) != DwarfFissionKind::None) - D.Diag(clang::diag::err_drv_riscv_unsupported_with_linker_relaxation) - << A->getAsString(Args); - } else { + else Features.push_back("-relax"); - } // If -mstrict-align, -mno-strict-align, -mscalar-strict-align, or // -mno-scalar-strict-align is passed, use it. Otherwise, the diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index c5d40c9825fab..0380568412e62 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -32,6 +32,7 @@ #include "clang/Driver/SanitizerArgs.h" #include "clang/Driver/Types.h" #include "clang/Driver/XRayArgs.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/SmallSet.h" @@ -4442,6 +4443,10 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T, DebuggerTuning != llvm::DebuggerKind::DBX))) CmdArgs.push_back("-gno-column-info"); + if (!Args.hasFlag(options::OPT_gcall_site_info, + options::OPT_gno_call_site_info, true)) + CmdArgs.push_back("-gno-call-site-info"); + // FIXME: Move backend command line options to the module. if (Args.hasFlag(options::OPT_gmodules, options::OPT_gno_modules, false)) { // If -gline-tables-only or -gline-directives-only is the last option it @@ -5057,6 +5062,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.ClaimAllArgs(options::OPT_femit_dwarf_unwind_EQ); } + bool IsAMDSPIRVForHIPDevice = + IsHIPDevice && getToolChain().getTriple().isSPIRV() && + getToolChain().getTriple().getVendor() == llvm::Triple::AMD; + if (isa(JA)) { assert(JA.getType() == types::TY_Plist && "Invalid output type."); CmdArgs.push_back("-analyze"); @@ -5154,6 +5163,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, rewriteKind = RK_Fragile; } else if (JA.getType() == types::TY_CIR) { CmdArgs.push_back("-emit-cir"); + } else if (JA.getType() == types::TY_Image && IsAMDSPIRVForHIPDevice) { + CmdArgs.push_back("-emit-obj"); } else { assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!"); } @@ -9084,7 +9095,9 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA, OPT_fno_lto, OPT_flto, OPT_flto_partitions_EQ, - OPT_flto_EQ}; + OPT_flto_EQ, + OPT_use_spirv_backend}; + const llvm::DenseSet LinkerOptions{OPT_mllvm, OPT_Zlinker_input}; auto ShouldForwardForToolChain = [&](Arg *A, const ToolChain &TC) { // Don't forward -mllvm to toolchains that don't support LLVM. diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 4c036f0f8dee3..d3539a594df11 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -3398,169 +3398,6 @@ void tools::handleInterchangeLoopsArgs(const ArgList &Args, CmdArgs.push_back("-floop-interchange"); } -// Parse -mprefer-vector-width=. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef tools::parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, - const llvm::opt::ArgList &Args) { - Arg *A = Args.getLastArg(options::OPT_mprefer_vector_width_EQ); - if (!A) - return ""; - - StringRef Value = A->getValue(); - unsigned Width LLVM_ATTRIBUTE_UNINITIALIZED; - - // Only "none" and Integer values are accepted by - // -mprefer-vector-width=. - if (Value != "none" && Value.getAsInteger(10, Width)) { - Diags.Report(clang::diag::err_drv_invalid_value) - << A->getOption().getName() << Value; - return ""; - } - - return Value; -} - -// This is a helper function for validating the optional refinement step -// parameter in reciprocal argument strings. Return false if there is an error -// parsing the refinement step. Otherwise, return true and set the Position -// of the refinement step in the input string. -static bool getRefinementStep(StringRef In, clang::DiagnosticsEngine &Diags, - const Arg &A, size_t &Position) { - const char RefinementStepToken = ':'; - Position = In.find(RefinementStepToken); - if (Position != StringRef::npos) { - StringRef Option = A.getOption().getName(); - StringRef RefStep = In.substr(Position + 1); - // Allow exactly one numeric character for the additional refinement - // step parameter. This is reasonable for all currently-supported - // operations and architectures because we would expect that a larger value - // of refinement steps would cause the estimate "optimization" to - // under-perform the native operation. Also, if the estimate does not - // converge quickly, it probably will not ever converge, so further - // refinement steps will not produce a better answer. - if (RefStep.size() != 1) { - Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; - return false; - } - char RefStepChar = RefStep[0]; - if (RefStepChar < '0' || RefStepChar > '9') { - Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; - return false; - } - } - return true; -} - -// Parse -mrecip. Return the Value string if well-formed. -// Otherwise, return an empty string and issue a diagnosic message if needed. -StringRef tools::parseMRecipOption(clang::DiagnosticsEngine &Diags, - const ArgList &Args) { - StringRef DisabledPrefixIn = "!"; - StringRef DisabledPrefixOut = "!"; - StringRef EnabledPrefixOut = ""; - StringRef Out = ""; - - Arg *A = Args.getLastArg(options::OPT_mrecip, options::OPT_mrecip_EQ); - if (!A) - return ""; - - unsigned NumOptions = A->getNumValues(); - if (NumOptions == 0) { - // No option is the same as "all". - return "all"; - } - - // Pass through "all", "none", or "default" with an optional refinement step. - if (NumOptions == 1) { - StringRef Val = A->getValue(0); - size_t RefStepLoc; - if (!getRefinementStep(Val, Diags, *A, RefStepLoc)) - return ""; - StringRef ValBase = Val.slice(0, RefStepLoc); - if (ValBase == "all" || ValBase == "none" || ValBase == "default") { - return Val; - } - } - - // Each reciprocal type may be enabled or disabled individually. - // Check each input value for validity, concatenate them all back together, - // and pass through. - - llvm::StringMap OptionStrings; - OptionStrings.insert(std::make_pair("divd", false)); - OptionStrings.insert(std::make_pair("divf", false)); - OptionStrings.insert(std::make_pair("divh", false)); - OptionStrings.insert(std::make_pair("vec-divd", false)); - OptionStrings.insert(std::make_pair("vec-divf", false)); - OptionStrings.insert(std::make_pair("vec-divh", false)); - OptionStrings.insert(std::make_pair("sqrtd", false)); - OptionStrings.insert(std::make_pair("sqrtf", false)); - OptionStrings.insert(std::make_pair("sqrth", false)); - OptionStrings.insert(std::make_pair("vec-sqrtd", false)); - OptionStrings.insert(std::make_pair("vec-sqrtf", false)); - OptionStrings.insert(std::make_pair("vec-sqrth", false)); - - for (unsigned i = 0; i != NumOptions; ++i) { - StringRef Val = A->getValue(i); - - bool IsDisabled = Val.starts_with(DisabledPrefixIn); - // Ignore the disablement token for string matching. - if (IsDisabled) - Val = Val.substr(1); - - size_t RefStep; - if (!getRefinementStep(Val, Diags, *A, RefStep)) - return ""; - - StringRef ValBase = Val.slice(0, RefStep); - llvm::StringMap::iterator OptionIter = OptionStrings.find(ValBase); - if (OptionIter == OptionStrings.end()) { - // Try again specifying float suffix. - OptionIter = OptionStrings.find(ValBase.str() + 'f'); - if (OptionIter == OptionStrings.end()) { - // The input name did not match any known option string. - Diags.Report(diag::err_drv_unknown_argument) << Val; - return ""; - } - // The option was specified without a half or float or double suffix. - // Make sure that the double or half entry was not already specified. - // The float entry will be checked below. - if (OptionStrings[ValBase.str() + 'd'] || - OptionStrings[ValBase.str() + 'h']) { - Diags.Report(diag::err_drv_invalid_value) - << A->getOption().getName() << Val; - return ""; - } - } - - if (OptionIter->second == true) { - // Duplicate option specified. - Diags.Report(diag::err_drv_invalid_value) - << A->getOption().getName() << Val; - return ""; - } - - // Mark the matched option as found. Do not allow duplicate specifiers. - OptionIter->second = true; - - // If the precision was not specified, also mark the double and half entry - // as found. - if (ValBase.back() != 'f' && ValBase.back() != 'd' && - ValBase.back() != 'h') { - OptionStrings[ValBase.str() + 'd'] = true; - OptionStrings[ValBase.str() + 'h'] = true; - } - - // Build the output string. - StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut; - Out = Args.MakeArgString(Out + Prefix + Val); - if (i != NumOptions - 1) - Out = Args.MakeArgString(Out + ","); - } - - return Out; -} - std::string tools::complexRangeKindToStr(LangOptions::ComplexRangeKind Range) { switch (Range) { case LangOptions::ComplexRangeKind::CX_Full: diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index cc4755cd6a9b0..438de23be0103 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -11,6 +11,7 @@ #include "clang/Basic/CodeGenOptions.h" #include "clang/Driver/CommonArgs.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "llvm/Frontend/Debug/Options.h" #include "llvm/Support/Path.h" diff --git a/clang/lib/Driver/ToolChains/HIPAMD.cpp b/clang/lib/Driver/ToolChains/HIPAMD.cpp index 231a38c2d3717..f2f64922cb404 100644 --- a/clang/lib/Driver/ToolChains/HIPAMD.cpp +++ b/clang/lib/Driver/ToolChains/HIPAMD.cpp @@ -159,10 +159,9 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA, // For SPIR-V the inputs for the job are device AMDGCN SPIR-V flavoured bitcode // and the output is either a compiled SPIR-V binary or bitcode (-emit-llvm). It -// calls llvm-link and then the llvm-spirv translator. Once the SPIR-V BE will -// be promoted from experimental, we will switch to using that. TODO: consider -// if we want to run any targeted optimisations over IR here, over generic -// SPIR-V. +// calls llvm-link and then the llvm-spirv translator or the SPIR-V BE. +// TODO: consider if we want to run any targeted optimisations over IR here, +// over generic SPIR-V. void AMDGCN::Linker::constructLinkAndEmitSpirvCommand( Compilation &C, const JobAction &JA, const InputInfoList &Inputs, const InputInfo &Output, const llvm::opt::ArgList &Args) const { @@ -173,17 +172,41 @@ void AMDGCN::Linker::constructLinkAndEmitSpirvCommand( const char *LinkedBCFilePath = HIP::getTempFile(C, LinkedBCFilePrefix, "bc"); InputInfo LinkedBCFile(&JA, LinkedBCFilePath, Output.getBaseInput()); + bool UseSPIRVBackend = + Args.hasFlag(options::OPT_use_spirv_backend, + options::OPT_no_use_spirv_backend, /*Default=*/false); + constructLlvmLinkCommand(C, JA, Inputs, LinkedBCFile, Args); - // Emit SPIR-V binary. - llvm::opt::ArgStringList TrArgs{ - "--spirv-max-version=1.6", - "--spirv-ext=+all", - "--spirv-allow-unknown-intrinsics", - "--spirv-lower-const-expr", - "--spirv-preserve-auxdata", - "--spirv-debug-info-version=nonsemantic-shader-200"}; - SPIRV::constructTranslateCommand(C, *this, JA, Output, LinkedBCFile, TrArgs); + if (UseSPIRVBackend) { + // This code handles the case in the new driver when --offload-device-only + // is unset and clang-linker-wrapper forwards the bitcode that must be + // compiled to SPIR-V. + + llvm::opt::ArgStringList CmdArgs; + const char *Triple = + C.getArgs().MakeArgString("-triple=spirv64-amd-amdhsa"); + + CmdArgs.append({"-cc1", Triple, "-emit-obj", "-disable-llvm-optzns", + LinkedBCFile.getFilename(), "-o", Output.getFilename()}); + + const Driver &Driver = getToolChain().getDriver(); + const char *Exec = Driver.getClangProgramPath(); + C.addCommand(std::make_unique( + JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, LinkedBCFile, + Output, Driver.getPrependArg())); + } else { + // Emit SPIR-V binary using the translator + llvm::opt::ArgStringList TrArgs{ + "--spirv-max-version=1.6", + "--spirv-ext=+all", + "--spirv-allow-unknown-intrinsics", + "--spirv-lower-const-expr", + "--spirv-preserve-auxdata", + "--spirv-debug-info-version=nonsemantic-shader-200"}; + SPIRV::constructTranslateCommand(C, *this, JA, Output, LinkedBCFile, + TrArgs); + } } // For amdgcn the inputs of the linker job are device bitcode and output is diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp index 020e7465548fe..2c741a38fce1a 100644 --- a/clang/lib/Driver/ToolChains/Linux.cpp +++ b/clang/lib/Driver/ToolChains/Linux.cpp @@ -927,7 +927,7 @@ SanitizerMask Linux::getSupportedSanitizers() const { if (IsX86_64 || IsSystemZ || IsPowerPC64) Res |= SanitizerKind::KernelMemory; if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch || - IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64) + IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64 || IsSystemZ) Res |= SanitizerKind::Scudo; if (IsX86_64 || IsAArch64 || IsRISCV64) { Res |= SanitizerKind::HWAddress; diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp index 9bbb33cb14502..f0e9aff2fd21a 100644 --- a/clang/lib/Format/Format.cpp +++ b/clang/lib/Format/Format.cpp @@ -405,11 +405,19 @@ struct ScalarEnumerationTraits { template <> struct MappingTraits { static void mapping(IO &IO, FormatStyle::IntegerLiteralSeparatorStyle &Base) { IO.mapOptional("Binary", Base.Binary); - IO.mapOptional("BinaryMinDigits", Base.BinaryMinDigits); + IO.mapOptional("BinaryMinDigitsInsert", Base.BinaryMinDigitsInsert); + IO.mapOptional("BinaryMaxDigitsRemove", Base.BinaryMaxDigitsRemove); IO.mapOptional("Decimal", Base.Decimal); - IO.mapOptional("DecimalMinDigits", Base.DecimalMinDigits); + IO.mapOptional("DecimalMinDigitsInsert", Base.DecimalMinDigitsInsert); + IO.mapOptional("DecimalMaxDigitsRemove", Base.DecimalMaxDigitsRemove); IO.mapOptional("Hex", Base.Hex); - IO.mapOptional("HexMinDigits", Base.HexMinDigits); + IO.mapOptional("HexMinDigitsInsert", Base.HexMinDigitsInsert); + IO.mapOptional("HexMaxDigitsRemove", Base.HexMaxDigitsRemove); + + // For backward compatibility. + IO.mapOptional("BinaryMinDigits", Base.BinaryMinDigitsInsert); + IO.mapOptional("DecimalMinDigits", Base.DecimalMinDigitsInsert); + IO.mapOptional("HexMinDigits", Base.HexMinDigitsInsert); } }; @@ -1758,10 +1766,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) { LLVMStyle.InsertBraces = false; LLVMStyle.InsertNewlineAtEOF = false; LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None; - LLVMStyle.IntegerLiteralSeparator = { - /*Binary=*/0, /*BinaryMinDigits=*/0, - /*Decimal=*/0, /*DecimalMinDigits=*/0, - /*Hex=*/0, /*HexMinDigits=*/0}; + LLVMStyle.IntegerLiteralSeparator = {}; LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave; LLVMStyle.JavaScriptWrapImports = true; LLVMStyle.KeepEmptyLines = { @@ -2183,7 +2188,7 @@ FormatStyle getClangFormatStyle() { Style.InsertBraces = true; Style.InsertNewlineAtEOF = true; Style.IntegerLiteralSeparator.Decimal = 3; - Style.IntegerLiteralSeparator.DecimalMinDigits = 5; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 5; Style.LineEnding = FormatStyle::LE_LF; Style.RemoveBracesLLVM = true; Style.RemoveEmptyLinesInUnwrappedLines = true; diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp index b51991bfeff4b..a283884b6c341 100644 --- a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp +++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp @@ -72,11 +72,22 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env, if (SkipBinary && SkipDecimal && SkipHex) return {}; - const auto BinaryMinDigits = - std::max((int)Option.BinaryMinDigits, Binary + 1); - const auto DecimalMinDigits = - std::max((int)Option.DecimalMinDigits, Decimal + 1); - const auto HexMinDigits = std::max((int)Option.HexMinDigits, Hex + 1); + auto CalcMinAndMax = [](int DigitsPerGroup, int MinDigitsInsert, + int MaxDigitsRemove) { + MinDigitsInsert = std::max(MinDigitsInsert, DigitsPerGroup + 1); + if (MinDigitsInsert < 1) + MaxDigitsRemove = 0; + else if (MaxDigitsRemove < 1 || MaxDigitsRemove >= MinDigitsInsert) + MaxDigitsRemove = MinDigitsInsert - 1; + return std::pair(MinDigitsInsert, MaxDigitsRemove); + }; + + const auto [BinaryMinDigitsInsert, BinaryMaxDigitsRemove] = CalcMinAndMax( + Binary, Option.BinaryMinDigitsInsert, Option.BinaryMaxDigitsRemove); + const auto [DecimalMinDigitsInsert, DecimalMaxDigitsRemove] = CalcMinAndMax( + Decimal, Option.DecimalMinDigitsInsert, Option.DecimalMaxDigitsRemove); + const auto [HexMinDigitsInsert, HexMaxDigitsRemove] = + CalcMinAndMax(Hex, Option.HexMinDigitsInsert, Option.HexMaxDigitsRemove); const auto &SourceMgr = Env.getSourceManager(); AffectedRangeManager AffectedRangeMgr(SourceMgr, Env.getCharRanges()); @@ -138,17 +149,23 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env, Text = Text.substr(Start, Length); } auto DigitsPerGroup = Decimal; - auto MinDigits = DecimalMinDigits; + auto MinDigitsInsert = DecimalMinDigitsInsert; + auto MaxDigitsRemove = DecimalMaxDigitsRemove; if (IsBase2) { DigitsPerGroup = Binary; - MinDigits = BinaryMinDigits; + MinDigitsInsert = BinaryMinDigitsInsert; + MaxDigitsRemove = BinaryMaxDigitsRemove; } else if (IsBase16) { DigitsPerGroup = Hex; - MinDigits = HexMinDigits; + MinDigitsInsert = HexMinDigitsInsert; + MaxDigitsRemove = HexMaxDigitsRemove; } const auto SeparatorCount = Text.count(Separator); const int DigitCount = Length - SeparatorCount; - const bool RemoveSeparator = DigitsPerGroup < 0 || DigitCount < MinDigits; + if (DigitCount > MaxDigitsRemove && DigitCount < MinDigitsInsert) + continue; + const bool RemoveSeparator = + DigitsPerGroup < 0 || DigitCount <= MaxDigitsRemove; if (RemoveSeparator && SeparatorCount == 0) continue; if (!RemoveSeparator && SeparatorCount > 0 && diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp index 1de779ccbf141..e72317da64596 100644 --- a/clang/lib/Frontend/ASTUnit.cpp +++ b/clang/lib/Frontend/ASTUnit.cpp @@ -44,6 +44,7 @@ #include "clang/Frontend/FrontendOptions.h" #include "clang/Frontend/MultiplexConsumer.h" #include "clang/Frontend/PrecompiledPreamble.h" +#include "clang/Frontend/StandaloneDiagnostic.h" #include "clang/Frontend/Utils.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/HeaderSearchOptions.h" @@ -210,15 +211,6 @@ getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation, return llvm::MemoryBuffer::getMemBufferCopy(Buffer->getBuffer(), FilePath); } -struct ASTUnit::ASTWriterData { - SmallString<128> Buffer; - llvm::BitstreamWriter Stream; - ASTWriter Writer; - - ASTWriterData(ModuleCache &ModCache, const CodeGenOptions &CGOpts) - : Stream(Buffer), Writer(Stream, Buffer, ModCache, CGOpts, {}) {} -}; - void ASTUnit::clearFileLevelDecls() { FileDecls.clear(); } @@ -581,73 +573,24 @@ class ASTInfoCollector : public ASTReaderListener { Counter = NewCounter; } }; +} // anonymous namespace -/// Diagnostic consumer that saves each diagnostic it is given. -class FilterAndStoreDiagnosticConsumer : public DiagnosticConsumer { - SmallVectorImpl *StoredDiags; - SmallVectorImpl *StandaloneDiags; - bool CaptureNonErrorsFromIncludes = true; - const LangOptions *LangOpts = nullptr; - SourceManager *SourceMgr = nullptr; - -public: - FilterAndStoreDiagnosticConsumer( - SmallVectorImpl *StoredDiags, - SmallVectorImpl *StandaloneDiags, - bool CaptureNonErrorsFromIncludes) - : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags), - CaptureNonErrorsFromIncludes(CaptureNonErrorsFromIncludes) { - assert((StoredDiags || StandaloneDiags) && - "No output collections were passed to StoredDiagnosticConsumer."); - } - - void BeginSourceFile(const LangOptions &LangOpts, - const Preprocessor *PP = nullptr) override { - this->LangOpts = &LangOpts; - if (PP) - SourceMgr = &PP->getSourceManager(); - } - - void HandleDiagnostic(DiagnosticsEngine::Level Level, - const Diagnostic &Info) override; -}; - -/// RAII object that optionally captures and filters diagnostics, if -/// there is no diagnostic client to capture them already. -class CaptureDroppedDiagnostics { - DiagnosticsEngine &Diags; - FilterAndStoreDiagnosticConsumer Client; - DiagnosticConsumer *PreviousClient = nullptr; - std::unique_ptr OwningPreviousClient; - -public: - CaptureDroppedDiagnostics( - CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags, - SmallVectorImpl *StoredDiags, - SmallVectorImpl *StandaloneDiags) - : Diags(Diags), - Client(StoredDiags, StandaloneDiags, - CaptureDiagnostics != - CaptureDiagsKind::AllWithoutNonErrorsFromIncludes) { - if (CaptureDiagnostics != CaptureDiagsKind::None || - Diags.getClient() == nullptr) { - OwningPreviousClient = Diags.takeClient(); - PreviousClient = Diags.getClient(); - Diags.setClient(&Client, false); - } - } - - ~CaptureDroppedDiagnostics() { - if (Diags.getClient() == &Client) - Diags.setClient(PreviousClient, !!OwningPreviousClient.release()); - } -}; - -} // namespace +FilterAndStoreDiagnosticConsumer::FilterAndStoreDiagnosticConsumer( + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags, + bool CaptureNonErrorsFromIncludes) + : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags), + CaptureNonErrorsFromIncludes(CaptureNonErrorsFromIncludes) { + assert((StoredDiags || StandaloneDiags) && + "No output collections were passed to StoredDiagnosticConsumer."); +} -static ASTUnit::StandaloneDiagnostic -makeStandaloneDiagnostic(const LangOptions &LangOpts, - const StoredDiagnostic &InDiag); +void FilterAndStoreDiagnosticConsumer::BeginSourceFile( + const LangOptions &LangOpts, const Preprocessor *PP) { + this->LangOpts = &LangOpts; + if (PP) + SourceMgr = &PP->getSourceManager(); +} static bool isInMainFile(const clang::Diagnostic &D) { if (!D.hasSourceManager() || !D.getLocation().isValid()) @@ -683,12 +626,32 @@ void FilterAndStoreDiagnosticConsumer::HandleDiagnostic( StoredDiag.emplace(Level, Info); ResultDiag = &*StoredDiag; } - StandaloneDiags->push_back( - makeStandaloneDiagnostic(*LangOpts, *ResultDiag)); + StandaloneDiags->emplace_back(*LangOpts, *ResultDiag); } } } +CaptureDroppedDiagnostics::CaptureDroppedDiagnostics( + CaptureDiagsKind CaptureDiagnostics, DiagnosticsEngine &Diags, + SmallVectorImpl *StoredDiags, + SmallVectorImpl *StandaloneDiags) + : Diags(Diags), + Client(StoredDiags, StandaloneDiags, + CaptureDiagnostics != + CaptureDiagsKind::AllWithoutNonErrorsFromIncludes) { + if (CaptureDiagnostics != CaptureDiagsKind::None || + Diags.getClient() == nullptr) { + OwningPreviousClient = Diags.takeClient(); + PreviousClient = Diags.getClient(); + Diags.setClient(&Client, false); + } +} + +CaptureDroppedDiagnostics::~CaptureDroppedDiagnostics() { + if (Diags.getClient() == &Client) + Diags.setClient(PreviousClient, !!OwningPreviousClient.release()); +} + IntrusiveRefCntPtr ASTUnit::getASTReader() const { return Reader; } @@ -1110,7 +1073,7 @@ class ASTUnitPreambleCallbacks : public PreambleCallbacks { unsigned Hash = 0; std::vector TopLevelDecls; std::vector TopLevelDeclIDs; - llvm::SmallVector PreambleDiags; + llvm::SmallVector PreambleDiags; }; } // namespace @@ -1259,10 +1222,17 @@ bool ASTUnit::Parse(std::shared_ptr PCHContainerOps, if (!Act->BeginSourceFile(*Clang, Clang->getFrontendOpts().Inputs[0])) return true; - if (SavedMainFileBuffer) - TranslateStoredDiagnostics(getFileManager(), getSourceManager(), - PreambleDiagnostics, StoredDiagnostics); - else + if (SavedMainFileBuffer) { + StoredDiagnostics.clear(); + StoredDiagnostics.reserve(PreambleDiagnostics.size()); + llvm::transform(std::move(PreambleDiagnostics), + std::back_inserter(StoredDiagnostics), + [&](auto &&StandaloneDiag) { + return translateStandaloneDiag( + getFileManager(), getSourceManager(), + std::move(StandaloneDiag), PreambleSrcLocCache); + }); + } else PreambleSrcLocCache.clear(); if (llvm::Error Err = Act->Execute()) { @@ -1281,51 +1251,6 @@ bool ASTUnit::Parse(std::shared_ptr PCHContainerOps, return false; } -static std::pair -makeStandaloneRange(CharSourceRange Range, const SourceManager &SM, - const LangOptions &LangOpts) { - CharSourceRange FileRange = Lexer::makeFileCharRange(Range, SM, LangOpts); - unsigned Offset = SM.getFileOffset(FileRange.getBegin()); - unsigned EndOffset = SM.getFileOffset(FileRange.getEnd()); - return std::make_pair(Offset, EndOffset); -} - -static ASTUnit::StandaloneFixIt makeStandaloneFixIt(const SourceManager &SM, - const LangOptions &LangOpts, - const FixItHint &InFix) { - ASTUnit::StandaloneFixIt OutFix; - OutFix.RemoveRange = makeStandaloneRange(InFix.RemoveRange, SM, LangOpts); - OutFix.InsertFromRange = - makeStandaloneRange(InFix.InsertFromRange, SM, LangOpts); - OutFix.CodeToInsert = InFix.CodeToInsert; - OutFix.BeforePreviousInsertions = InFix.BeforePreviousInsertions; - return OutFix; -} - -static ASTUnit::StandaloneDiagnostic -makeStandaloneDiagnostic(const LangOptions &LangOpts, - const StoredDiagnostic &InDiag) { - ASTUnit::StandaloneDiagnostic OutDiag; - OutDiag.ID = InDiag.getID(); - OutDiag.Level = InDiag.getLevel(); - OutDiag.Message = std::string(InDiag.getMessage()); - OutDiag.LocOffset = 0; - if (InDiag.getLocation().isInvalid()) - return OutDiag; - const SourceManager &SM = InDiag.getLocation().getManager(); - SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation()); - OutDiag.Filename = std::string(SM.getFilename(FileLoc)); - if (OutDiag.Filename.empty()) - return OutDiag; - OutDiag.LocOffset = SM.getFileOffset(FileLoc); - for (const auto &Range : InDiag.getRanges()) - OutDiag.Ranges.push_back(makeStandaloneRange(Range, SM, LangOpts)); - for (const auto &FixIt : InDiag.getFixIts()) - OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, FixIt)); - - return OutDiag; -} - /// Attempt to build or re-use a precompiled preamble when (re-)parsing /// the source file. /// @@ -1780,114 +1705,6 @@ std::unique_ptr ASTUnit::LoadFromCompilerInvocation( return AST; } -std::unique_ptr ASTUnit::LoadFromCommandLine( - const char **ArgBegin, const char **ArgEnd, - std::shared_ptr PCHContainerOps, - std::shared_ptr DiagOpts, - IntrusiveRefCntPtr Diags, StringRef ResourceFilesPath, - bool StorePreamblesInMemory, StringRef PreambleStoragePath, - bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics, - ArrayRef RemappedFiles, bool RemappedFilesKeepOriginalName, - unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind, - bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion, - bool AllowPCHWithCompilerErrors, SkipFunctionBodiesScope SkipFunctionBodies, - bool SingleFileParse, bool UserFilesAreVolatile, bool ForSerialization, - bool RetainExcludedConditionalBlocks, std::optional ModuleFormat, - std::unique_ptr *ErrAST, - IntrusiveRefCntPtr VFS) { - assert(Diags.get() && "no DiagnosticsEngine was provided"); - - // If no VFS was provided, create one that tracks the physical file system. - // If '-working-directory' was passed as an argument, 'createInvocation' will - // set this as the current working directory of the VFS. - if (!VFS) - VFS = llvm::vfs::createPhysicalFileSystem(); - - SmallVector StoredDiagnostics; - - std::shared_ptr CI; - - { - CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags, - &StoredDiagnostics, nullptr); - - CreateInvocationOptions CIOpts; - CIOpts.VFS = VFS; - CIOpts.Diags = Diags; - CIOpts.ProbePrecompiled = true; // FIXME: historical default. Needed? - CI = createInvocation(llvm::ArrayRef(ArgBegin, ArgEnd), std::move(CIOpts)); - if (!CI) - return nullptr; - } - - // Override any files that need remapping - for (const auto &RemappedFile : RemappedFiles) { - CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first, - RemappedFile.second); - } - PreprocessorOptions &PPOpts = CI->getPreprocessorOpts(); - PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName; - PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors; - PPOpts.SingleFileParseMode = SingleFileParse; - PPOpts.RetainExcludedConditionalBlocks = RetainExcludedConditionalBlocks; - - // Override the resources path. - CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath); - - CI->getFrontendOpts().SkipFunctionBodies = - SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile; - - if (ModuleFormat) - CI->getHeaderSearchOpts().ModuleFormat = std::string(*ModuleFormat); - - // Create the AST unit. - std::unique_ptr AST; - AST.reset(new ASTUnit(false)); - AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size(); - AST->StoredDiagnostics.swap(StoredDiagnostics); - ConfigureDiags(Diags, *AST, CaptureDiagnostics); - AST->DiagOpts = DiagOpts; - AST->Diagnostics = Diags; - AST->FileSystemOpts = CI->getFileSystemOpts(); - AST->CodeGenOpts = std::make_unique(CI->getCodeGenOpts()); - VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS); - AST->FileMgr = - llvm::makeIntrusiveRefCnt(AST->FileSystemOpts, VFS); - AST->StorePreamblesInMemory = StorePreamblesInMemory; - AST->PreambleStoragePath = PreambleStoragePath; - AST->ModCache = createCrossProcessModuleCache(); - AST->OnlyLocalDecls = OnlyLocalDecls; - AST->CaptureDiagnostics = CaptureDiagnostics; - AST->TUKind = TUKind; - AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults; - AST->IncludeBriefCommentsInCodeCompletion = - IncludeBriefCommentsInCodeCompletion; - AST->UserFilesAreVolatile = UserFilesAreVolatile; - AST->Invocation = CI; - AST->SkipFunctionBodies = SkipFunctionBodies; - if (ForSerialization) - AST->WriterData.reset(new ASTWriterData(*AST->ModCache, *AST->CodeGenOpts)); - // Zero out now to ease cleanup during crash recovery. - CI = nullptr; - Diags = nullptr; - - // Recover resources if we crash before exiting this method. - llvm::CrashRecoveryContextCleanupRegistrar ASTUnitCleanup(AST.get()); - - if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps), - PrecompilePreambleAfterNParses, VFS)) { - // Some error occurred, if caller wants to examine diagnostics, pass it the - // ASTUnit. - if (ErrAST) { - AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics); - ErrAST->swap(AST); - } - return nullptr; - } - - return AST; -} - bool ASTUnit::Reparse(std::shared_ptr PCHContainerOps, ArrayRef RemappedFiles, IntrusiveRefCntPtr VFS) { @@ -2406,64 +2223,6 @@ bool ASTUnit::serialize(raw_ostream &OS) { return serializeUnit(Writer, Buffer, getSema(), OS); } -void ASTUnit::TranslateStoredDiagnostics( - FileManager &FileMgr, SourceManager &SrcMgr, - const SmallVectorImpl &Diags, - SmallVectorImpl &Out) { - // Map the standalone diagnostic into the new source manager. We also need to - // remap all the locations to the new view. This includes the diag location, - // any associated source ranges, and the source ranges of associated fix-its. - // FIXME: There should be a cleaner way to do this. - SmallVector Result; - Result.reserve(Diags.size()); - - for (const auto &SD : Diags) { - // Rebuild the StoredDiagnostic. - if (SD.Filename.empty()) - continue; - auto FE = FileMgr.getOptionalFileRef(SD.Filename); - if (!FE) - continue; - SourceLocation FileLoc; - auto ItFileID = PreambleSrcLocCache.find(SD.Filename); - if (ItFileID == PreambleSrcLocCache.end()) { - FileID FID = SrcMgr.translateFile(*FE); - FileLoc = SrcMgr.getLocForStartOfFile(FID); - PreambleSrcLocCache[SD.Filename] = FileLoc; - } else { - FileLoc = ItFileID->getValue(); - } - - if (FileLoc.isInvalid()) - continue; - SourceLocation L = FileLoc.getLocWithOffset(SD.LocOffset); - FullSourceLoc Loc(L, SrcMgr); - - SmallVector Ranges; - Ranges.reserve(SD.Ranges.size()); - for (const auto &Range : SD.Ranges) { - SourceLocation BL = FileLoc.getLocWithOffset(Range.first); - SourceLocation EL = FileLoc.getLocWithOffset(Range.second); - Ranges.push_back(CharSourceRange::getCharRange(BL, EL)); - } - - SmallVector FixIts; - FixIts.reserve(SD.FixIts.size()); - for (const auto &FixIt : SD.FixIts) { - FixIts.push_back(FixItHint()); - FixItHint &FH = FixIts.back(); - FH.CodeToInsert = FixIt.CodeToInsert; - SourceLocation BL = FileLoc.getLocWithOffset(FixIt.RemoveRange.first); - SourceLocation EL = FileLoc.getLocWithOffset(FixIt.RemoveRange.second); - FH.RemoveRange = CharSourceRange::getCharRange(BL, EL); - } - - Result.push_back( - StoredDiagnostic(SD.Level, SD.ID, SD.Message, Loc, Ranges, FixIts)); - } - Result.swap(Out); -} - void ASTUnit::addFileLevelDecl(Decl *D) { assert(D); diff --git a/clang/lib/Frontend/CMakeLists.txt b/clang/lib/Frontend/CMakeLists.txt index dac9e0d26f393..634f239933605 100644 --- a/clang/lib/Frontend/CMakeLists.txt +++ b/clang/lib/Frontend/CMakeLists.txt @@ -17,7 +17,6 @@ add_clang_library(clangFrontend ChainedIncludesSource.cpp CompilerInstance.cpp CompilerInvocation.cpp - CreateInvocationFromCommandLine.cpp DependencyFile.cpp DependencyGraph.cpp DiagnosticRenderer.cpp @@ -36,6 +35,7 @@ add_clang_library(clangFrontend SARIFDiagnosticPrinter.cpp SerializedDiagnosticPrinter.cpp SerializedDiagnosticReader.cpp + StandaloneDiagnostic.cpp TestModuleFileExtension.cpp TextDiagnostic.cpp TextDiagnosticBuffer.cpp @@ -51,7 +51,6 @@ add_clang_library(clangFrontend clangAPINotes clangAST clangBasic - clangDriver clangOptions clangEdit clangLex diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index c7c29a91721c0..d2a5ed6262de1 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -27,7 +27,6 @@ #include "clang/Basic/Version.h" #include "clang/Basic/XRayInstr.h" #include "clang/Config/config.h" -#include "clang/Driver/Driver.h" #include "clang/Frontend/CommandLineSourceLoc.h" #include "clang/Frontend/DependencyOutputOptions.h" #include "clang/Frontend/FrontendOptions.h" @@ -3273,13 +3272,6 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, return Diags.getNumErrors() == NumErrorsBefore; } -std::string CompilerInvocation::GetResourcesPath(const char *Argv0, - void *MainAddr) { - std::string ClangExecutable = - llvm::sys::fs::getMainExecutable(Argv0, MainAddr); - return driver::Driver::GetResourcesPath(ClangExecutable); -} - static void GenerateHeaderSearchArgs(const HeaderSearchOptions &Opts, ArgumentConsumer Consumer) { const HeaderSearchOptions *HeaderSearchOpts = &Opts; @@ -3956,21 +3948,7 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts, std::to_string(*Opts.AllocTokenMax)); if (Opts.AllocTokenMode) { - StringRef S; - switch (*Opts.AllocTokenMode) { - case llvm::AllocTokenMode::Increment: - S = "increment"; - break; - case llvm::AllocTokenMode::Random: - S = "random"; - break; - case llvm::AllocTokenMode::TypeHash: - S = "typehash"; - break; - case llvm::AllocTokenMode::TypeHashPointerSplit: - S = "typehashpointersplit"; - break; - } + StringRef S = llvm::getAllocTokenModeAsString(*Opts.AllocTokenMode); GenerateArg(Consumer, OPT_falloc_token_mode_EQ, S); } } diff --git a/clang/lib/Frontend/StandaloneDiagnostic.cpp b/clang/lib/Frontend/StandaloneDiagnostic.cpp new file mode 100644 index 0000000000000..4f19c91b7d266 --- /dev/null +++ b/clang/lib/Frontend/StandaloneDiagnostic.cpp @@ -0,0 +1,117 @@ +//===--- StandaloneDiagnostic.h - Serializable Diagnostic ------------- ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/Frontend/StandaloneDiagnostic.h" +#include "clang/Lex/Lexer.h" + +namespace clang { + +StandaloneDiagnostic::SourceOffsetRange::SourceOffsetRange( + CharSourceRange Range, const SourceManager &SrcMgr, + const LangOptions &LangOpts) { + const auto FileRange = Lexer::makeFileCharRange(Range, SrcMgr, LangOpts); + Begin = SrcMgr.getFileOffset(FileRange.getBegin()); + End = SrcMgr.getFileOffset(FileRange.getEnd()); +} + +StandaloneDiagnostic::StandaloneFixIt::StandaloneFixIt( + const SourceManager &SrcMgr, const LangOptions &LangOpts, + const FixItHint &FixIt) + : RemoveRange(FixIt.RemoveRange, SrcMgr, LangOpts), + InsertFromRange(FixIt.InsertFromRange, SrcMgr, LangOpts), + CodeToInsert(FixIt.CodeToInsert), + BeforePreviousInsertions(FixIt.BeforePreviousInsertions) {} + +StandaloneDiagnostic::StandaloneDiagnostic(const LangOptions &LangOpts, + const StoredDiagnostic &InDiag) + : Level(InDiag.getLevel()), ID(InDiag.getID()), + Message(InDiag.getMessage()) { + const FullSourceLoc &FullLoc = InDiag.getLocation(); + // This is not an invalid diagnostic; invalid SourceLocations are used to + // represent diagnostics without a specific SourceLocation. + if (FullLoc.isInvalid()) + return; + + const auto &SrcMgr = FullLoc.getManager(); + FileKind = SrcMgr.getFileCharacteristic(static_cast(FullLoc)); + const auto FileLoc = SrcMgr.getFileLoc(static_cast(FullLoc)); + FileOffset = SrcMgr.getFileOffset(FileLoc); + Filename = SrcMgr.getFilename(FileLoc); + assert(!Filename.empty() && "diagnostic with location has no source file?"); + + Ranges.reserve(InDiag.getRanges().size()); + for (const auto &Range : InDiag.getRanges()) + Ranges.emplace_back(Range, SrcMgr, LangOpts); + + FixIts.reserve(InDiag.getFixIts().size()); + for (const auto &FixIt : InDiag.getFixIts()) + FixIts.emplace_back(SrcMgr, LangOpts, FixIt); +} + +StoredDiagnostic +translateStandaloneDiag(FileManager &FileMgr, SourceManager &SrcMgr, + const StandaloneDiagnostic &StandaloneDiag, + llvm::StringMap &SrcLocCache) { + const auto FileRef = FileMgr.getOptionalFileRef(StandaloneDiag.Filename); + if (!FileRef) + return StoredDiagnostic(StandaloneDiag.Level, StandaloneDiag.ID, + StandaloneDiag.Message); + + // Try to get FileLoc from cache first + SourceLocation FileLoc; + auto It = SrcLocCache.find(StandaloneDiag.Filename); + if (It != SrcLocCache.end()) { + FileLoc = It->getValue(); + } + + // Cache miss - compute and cache the location + if (FileLoc.isInvalid()) { + const auto FileID = + SrcMgr.getOrCreateFileID(*FileRef, StandaloneDiag.FileKind); + FileLoc = SrcMgr.getLocForStartOfFile(FileID); + + if (FileLoc.isInvalid()) + return StoredDiagnostic(StandaloneDiag.Level, StandaloneDiag.ID, + StandaloneDiag.Message); + + SrcLocCache[StandaloneDiag.Filename] = FileLoc; + } + + const auto DiagLoc = FileLoc.getLocWithOffset(StandaloneDiag.FileOffset); + const FullSourceLoc Loc(DiagLoc, SrcMgr); + + auto ConvertOffsetRange = + [&](const StandaloneDiagnostic::SourceOffsetRange &Range) { + return CharSourceRange( + SourceRange(FileLoc.getLocWithOffset(Range.Begin), + FileLoc.getLocWithOffset(Range.End)), + /*IsTokenRange*/ false); + }; + + SmallVector TranslatedRanges; + TranslatedRanges.reserve(StandaloneDiag.Ranges.size()); + transform(StandaloneDiag.Ranges, std::back_inserter(TranslatedRanges), + ConvertOffsetRange); + + SmallVector TranslatedFixIts; + TranslatedFixIts.reserve(StandaloneDiag.FixIts.size()); + for (const auto &FixIt : StandaloneDiag.FixIts) { + FixItHint TranslatedFixIt; + TranslatedFixIt.CodeToInsert = FixIt.CodeToInsert; + TranslatedFixIt.RemoveRange = ConvertOffsetRange(FixIt.RemoveRange); + TranslatedFixIt.InsertFromRange = ConvertOffsetRange(FixIt.InsertFromRange); + TranslatedFixIt.BeforePreviousInsertions = FixIt.BeforePreviousInsertions; + TranslatedFixIts.push_back(std::move(TranslatedFixIt)); + } + + return StoredDiagnostic(StandaloneDiag.Level, StandaloneDiag.ID, + StandaloneDiag.Message, Loc, TranslatedRanges, + TranslatedFixIts); +} + +} // namespace clang diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h index d1dc8275431c0..3550409b6988d 100644 --- a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h +++ b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h @@ -137,11 +137,7 @@ template constexpr vector lit_impl(T NDotL, T NDotH, T M) { } template constexpr T faceforward_impl(T N, T I, T Ng) { -#if (__has_builtin(__builtin_spirv_faceforward)) - return __builtin_spirv_faceforward(N, I, Ng); -#else return select(dot(I, Ng) < 0, N, -N); -#endif } template constexpr T ldexp_impl(T X, T Exp) { diff --git a/clang/lib/Interpreter/CMakeLists.txt b/clang/lib/Interpreter/CMakeLists.txt index 37faa0302caaa..9a597146b2fc4 100644 --- a/clang/lib/Interpreter/CMakeLists.txt +++ b/clang/lib/Interpreter/CMakeLists.txt @@ -46,6 +46,7 @@ add_clang_library(clangInterpreter clangFrontend clangFrontendTool clangLex + clangOptions clangParse clangSema clangSerialization diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp index 7764fa7dc92b9..6cbc5e9910bcc 100644 --- a/clang/lib/Interpreter/Interpreter.cpp +++ b/clang/lib/Interpreter/Interpreter.cpp @@ -42,6 +42,7 @@ #include "clang/Interpreter/Interpreter.h" #include "clang/Interpreter/Value.h" #include "clang/Lex/PreprocessorOptions.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "clang/Sema/Lookup.h" #include "clang/Serialization/ObjectFilePCHContainerReader.h" @@ -105,7 +106,7 @@ CreateCI(const llvm::opt::ArgStringList &Argv) { if (Clang->getHeaderSearchOpts().UseBuiltinIncludes && Clang->getHeaderSearchOpts().ResourceDir.empty()) Clang->getHeaderSearchOpts().ResourceDir = - CompilerInvocation::GetResourcesPath(Argv[0], nullptr); + GetResourcesPath(Argv[0], nullptr); Clang->createVirtualFileSystem(); diff --git a/clang/lib/Options/OptionUtils.cpp b/clang/lib/Options/OptionUtils.cpp index fcafd3c83c6b3..e5aefa012f679 100644 --- a/clang/lib/Options/OptionUtils.cpp +++ b/clang/lib/Options/OptionUtils.cpp @@ -9,7 +9,12 @@ #include "clang/Options/OptionUtils.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/DiagnosticDriver.h" +#include "clang/Basic/Version.h" +#include "clang/Config/config.h" +#include "clang/Options/Options.h" #include "llvm/Option/ArgList.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/Path.h" using namespace clang; using namespace llvm::opt; @@ -31,17 +36,211 @@ IntTy getLastArgIntValueImpl(const ArgList &Args, OptSpecifier Id, } } // namespace -namespace clang { - -int getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default, - DiagnosticsEngine *Diags, unsigned Base) { +int clang::getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default, + DiagnosticsEngine *Diags, unsigned Base) { return getLastArgIntValueImpl(Args, Id, Default, Diags, Base); } -uint64_t getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id, - uint64_t Default, DiagnosticsEngine *Diags, - unsigned Base) { +uint64_t clang::getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id, + uint64_t Default, + DiagnosticsEngine *Diags, unsigned Base) { return getLastArgIntValueImpl(Args, Id, Default, Diags, Base); } -} // namespace clang +StringRef clang::parseMPreferVectorWidthOption(clang::DiagnosticsEngine &Diags, + const llvm::opt::ArgList &Args) { + const Arg *A = Args.getLastArg(options::OPT_mprefer_vector_width_EQ); + if (!A) + return ""; + + StringRef Value = A->getValue(); + unsigned Width LLVM_ATTRIBUTE_UNINITIALIZED; + + // Only "none" and Integer values are accepted by + // -mprefer-vector-width=. + if (Value != "none" && Value.getAsInteger(10, Width)) { + Diags.Report(clang::diag::err_drv_invalid_value) + << A->getOption().getName() << Value; + return ""; + } + + return Value; +} + +// This is a helper function for validating the optional refinement step +// parameter in reciprocal argument strings. Return false if there is an error +// parsing the refinement step. Otherwise, return true and set the Position +// of the refinement step in the input string. +static bool getRefinementStep(StringRef In, clang::DiagnosticsEngine &Diags, + const Arg &A, size_t &Position) { + const char RefinementStepToken = ':'; + Position = In.find(RefinementStepToken); + if (Position != StringRef::npos) { + StringRef Option = A.getOption().getName(); + StringRef RefStep = In.substr(Position + 1); + // Allow exactly one numeric character for the additional refinement + // step parameter. This is reasonable for all currently-supported + // operations and architectures because we would expect that a larger value + // of refinement steps would cause the estimate "optimization" to + // under-perform the native operation. Also, if the estimate does not + // converge quickly, it probably will not ever converge, so further + // refinement steps will not produce a better answer. + if (RefStep.size() != 1) { + Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; + return false; + } + char RefStepChar = RefStep[0]; + if (RefStepChar < '0' || RefStepChar > '9') { + Diags.Report(diag::err_drv_invalid_value) << Option << RefStep; + return false; + } + } + return true; +} + +StringRef clang::parseMRecipOption(clang::DiagnosticsEngine &Diags, + const ArgList &Args) { + StringRef DisabledPrefixIn = "!"; + StringRef DisabledPrefixOut = "!"; + StringRef EnabledPrefixOut = ""; + StringRef Out = ""; + + const Arg *A = Args.getLastArg(options::OPT_mrecip, options::OPT_mrecip_EQ); + if (!A) + return ""; + + const unsigned NumOptions = A->getNumValues(); + if (NumOptions == 0) { + // No option is the same as "all". + return "all"; + } + + // Pass through "all", "none", or "default" with an optional refinement step. + if (NumOptions == 1) { + StringRef Val = A->getValue(0); + size_t RefStepLoc; + if (!getRefinementStep(Val, Diags, *A, RefStepLoc)) + return ""; + StringRef ValBase = Val.slice(0, RefStepLoc); + if (ValBase == "all" || ValBase == "none" || ValBase == "default") { + return Val; + } + } + + // Each reciprocal type may be enabled or disabled individually. + // Check each input value for validity, concatenate them all back together, + // and pass through. + + llvm::StringMap OptionStrings; + OptionStrings.insert(std::make_pair("divd", false)); + OptionStrings.insert(std::make_pair("divf", false)); + OptionStrings.insert(std::make_pair("divh", false)); + OptionStrings.insert(std::make_pair("vec-divd", false)); + OptionStrings.insert(std::make_pair("vec-divf", false)); + OptionStrings.insert(std::make_pair("vec-divh", false)); + OptionStrings.insert(std::make_pair("sqrtd", false)); + OptionStrings.insert(std::make_pair("sqrtf", false)); + OptionStrings.insert(std::make_pair("sqrth", false)); + OptionStrings.insert(std::make_pair("vec-sqrtd", false)); + OptionStrings.insert(std::make_pair("vec-sqrtf", false)); + OptionStrings.insert(std::make_pair("vec-sqrth", false)); + + for (unsigned i = 0; i != NumOptions; ++i) { + StringRef Val = A->getValue(i); + + bool IsDisabled = Val.starts_with(DisabledPrefixIn); + // Ignore the disablement token for string matching. + if (IsDisabled) + Val = Val.substr(1); + + size_t RefStep; + if (!getRefinementStep(Val, Diags, *A, RefStep)) + return ""; + + StringRef ValBase = Val.slice(0, RefStep); + llvm::StringMap::iterator OptionIter = OptionStrings.find(ValBase); + if (OptionIter == OptionStrings.end()) { + // Try again specifying float suffix. + OptionIter = OptionStrings.find(ValBase.str() + 'f'); + if (OptionIter == OptionStrings.end()) { + // The input name did not match any known option string. + Diags.Report(diag::err_drv_unknown_argument) << Val; + return ""; + } + // The option was specified without a half or float or double suffix. + // Make sure that the double or half entry was not already specified. + // The float entry will be checked below. + if (OptionStrings[ValBase.str() + 'd'] || + OptionStrings[ValBase.str() + 'h']) { + Diags.Report(diag::err_drv_invalid_value) + << A->getOption().getName() << Val; + return ""; + } + } + + if (OptionIter->second == true) { + // Duplicate option specified. + Diags.Report(diag::err_drv_invalid_value) + << A->getOption().getName() << Val; + return ""; + } + + // Mark the matched option as found. Do not allow duplicate specifiers. + OptionIter->second = true; + + // If the precision was not specified, also mark the double and half entry + // as found. + if (ValBase.back() != 'f' && ValBase.back() != 'd' && + ValBase.back() != 'h') { + OptionStrings[ValBase.str() + 'd'] = true; + OptionStrings[ValBase.str() + 'h'] = true; + } + + // Build the output string. + StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut; + Out = Args.MakeArgString(Out + Prefix + Val); + if (i != NumOptions - 1) + Out = Args.MakeArgString(Out + ","); + } + + return Out; +} + +std::string clang::GetResourcesPath(StringRef BinaryPath) { + // Since the resource directory is embedded in the module hash, it's important + // that all places that need it call this function, so that they get the + // exact same string ("a/../b/" and "b/" get different hashes, for example). + + // Dir is bin/ or lib/, depending on where BinaryPath is. + StringRef Dir = llvm::sys::path::parent_path(BinaryPath); + SmallString<128> P(Dir); + + StringRef ConfiguredResourceDir(CLANG_RESOURCE_DIR); + if (!ConfiguredResourceDir.empty()) { + // FIXME: We should fix the behavior of llvm::sys::path::append so we don't + // need to check for absolute paths here. + if (llvm::sys::path::is_absolute(ConfiguredResourceDir)) + P = ConfiguredResourceDir; + else + llvm::sys::path::append(P, ConfiguredResourceDir); + } else { + // On Windows, libclang.dll is in bin/. + // On non-Windows, libclang.so/.dylib is in lib/. + // With a static-library build of libclang, LibClangPath will contain the + // path of the embedding binary, which for LLVM binaries will be in bin/. + // ../lib gets us to lib/ in both cases. + P = llvm::sys::path::parent_path(Dir); + // This search path is also created in the COFF driver of lld, so any + // changes here also needs to happen in lld/COFF/Driver.cpp + llvm::sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang", + CLANG_VERSION_MAJOR_STRING); + } + + return std::string(P); +} + +std::string clang::GetResourcesPath(const char *Argv0, void *MainAddr) { + const std::string ClangExecutable = + llvm::sys::fs::getMainExecutable(Argv0, MainAddr); + return GetResourcesPath(ClangExecutable); +} diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp index f9665b5e59831..c91ca751984c9 100644 --- a/clang/lib/Sema/CheckExprLifetime.cpp +++ b/clang/lib/Sema/CheckExprLifetime.cpp @@ -17,6 +17,9 @@ #include "llvm/ADT/PointerIntPair.h" namespace clang::sema { +using lifetimes::isGslOwnerType; +using lifetimes::isGslPointerType; + namespace { enum LifetimeKind { /// The lifetime of a temporary bound to this entity ends at the end of the @@ -257,38 +260,8 @@ static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path, Expr *Init, ReferenceKind RK, LocalVisitor Visit); -template static bool isRecordWithAttr(QualType Type) { - auto *RD = Type->getAsCXXRecordDecl(); - if (!RD) - return false; - // Generally, if a primary template class declaration is annotated with an - // attribute, all its specializations generated from template instantiations - // should inherit the attribute. - // - // However, since lifetime analysis occurs during parsing, we may encounter - // cases where a full definition of the specialization is not required. In - // such cases, the specialization declaration remains incomplete and lacks the - // attribute. Therefore, we fall back to checking the primary template class. - // - // Note: it is possible for a specialization declaration to have an attribute - // even if the primary template does not. - // - // FIXME: What if the primary template and explicit specialization - // declarations have conflicting attributes? We should consider diagnosing - // this scenario. - bool Result = RD->hasAttr(); - - if (auto *CTSD = dyn_cast(RD)) - Result |= CTSD->getSpecializedTemplate()->getTemplatedDecl()->hasAttr(); - - return Result; -} - -// Tells whether the type is annotated with [[gsl::Pointer]]. -bool isGLSPointerType(QualType QT) { return isRecordWithAttr(QT); } - static bool isPointerLikeType(QualType QT) { - return isGLSPointerType(QT) || QT->isPointerType() || QT->isNullPtrType(); + return isGslPointerType(QT) || QT->isPointerType() || QT->isNullPtrType(); } // Decl::isInStdNamespace will return false for iterators in some STL @@ -331,7 +304,7 @@ static bool isContainerOfOwner(const RecordDecl *Container) { return false; const auto &TAs = CTSD->getTemplateArgs(); return TAs.size() > 0 && TAs[0].getKind() == TemplateArgument::Type && - isRecordWithAttr(TAs[0].getAsType()); + isGslOwnerType(TAs[0].getAsType()); } // Returns true if the given Record is `std::initializer_list`. @@ -349,14 +322,13 @@ static bool isStdInitializerListOfPointer(const RecordDecl *RD) { static bool shouldTrackImplicitObjectArg(const CXXMethodDecl *Callee) { if (auto *Conv = dyn_cast_or_null(Callee)) - if (isRecordWithAttr(Conv->getConversionType()) && + if (isGslPointerType(Conv->getConversionType()) && Callee->getParent()->hasAttr()) return true; if (!isInStlNamespace(Callee->getParent())) return false; - if (!isRecordWithAttr( - Callee->getFunctionObjectParameterType()) && - !isRecordWithAttr(Callee->getFunctionObjectParameterType())) + if (!isGslPointerType(Callee->getFunctionObjectParameterType()) && + !isGslOwnerType(Callee->getFunctionObjectParameterType())) return false; if (isPointerLikeType(Callee->getReturnType())) { if (!Callee->getIdentifier()) @@ -393,7 +365,7 @@ static bool shouldTrackFirstArgument(const FunctionDecl *FD) { if (!RD->hasAttr() && !RD->hasAttr()) return false; if (FD->getReturnType()->isPointerType() || - isRecordWithAttr(FD->getReturnType())) { + isGslPointerType(FD->getReturnType())) { return llvm::StringSwitch(FD->getName()) .Cases({"begin", "rbegin", "cbegin", "crbegin"}, true) .Cases({"end", "rend", "cend", "crend"}, true) @@ -465,7 +437,7 @@ shouldTrackFirstArgumentForConstructor(const CXXConstructExpr *Ctor) { return true; // RHS must be an owner. - if (!isRecordWithAttr(RHSArgType)) + if (!isGslOwnerType(RHSArgType)) return false; // Bail out if the RHS is Owner. @@ -547,7 +519,7 @@ static void visitFunctionCallArguments(IndirectLocalPath &Path, Expr *Call, // Once we initialized a value with a non gsl-owner reference, it can no // longer dangle. if (ReturnType->isReferenceType() && - !isRecordWithAttr(ReturnType->getPointeeType())) { + !isGslOwnerType(ReturnType->getPointeeType())) { for (const IndirectLocalPathEntry &PE : llvm::reverse(Path)) { if (PE.Kind == IndirectLocalPathEntry::GslReferenceInit || PE.Kind == IndirectLocalPathEntry::LifetimeBoundCall) @@ -1158,8 +1130,7 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, // auto p2 = Temp().owner; // Here p2 is dangling. if (const auto *FD = llvm::dyn_cast_or_null(E.D); FD && !FD->getType()->isReferenceType() && - isRecordWithAttr(FD->getType()) && - LK != LK_MemInitializer) { + isGslOwnerType(FD->getType()) && LK != LK_MemInitializer) { return Report; } return Abandon; @@ -1191,10 +1162,9 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, // const GSLOwner& func(const Foo& foo [[clang::lifetimebound]]) // GSLOwner* func(cosnt Foo& foo [[clang::lifetimebound]]) // GSLPointer func(const Foo& foo [[clang::lifetimebound]]) - if (FD && - ((FD->getReturnType()->isPointerOrReferenceType() && - isRecordWithAttr(FD->getReturnType()->getPointeeType())) || - isGLSPointerType(FD->getReturnType()))) + if (FD && ((FD->getReturnType()->isPointerOrReferenceType() && + isGslOwnerType(FD->getReturnType()->getPointeeType())) || + isGslPointerType(FD->getReturnType()))) return Report; return Abandon; @@ -1206,7 +1176,7 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, // int &p = *localUniquePtr; // someContainer.add(std::move(localUniquePtr)); // return p; - if (!pathContainsInit(Path) && isRecordWithAttr(L->getType())) + if (!pathContainsInit(Path) && isGslOwnerType(L->getType())) return Report; return Abandon; } @@ -1215,8 +1185,7 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, auto *MTE = dyn_cast(L); bool IsGslPtrValueFromGslTempOwner = - MTE && !MTE->getExtendingDecl() && - isRecordWithAttr(MTE->getType()); + MTE && !MTE->getExtendingDecl() && isGslOwnerType(MTE->getType()); // Skipping a chain of initializing gsl::Pointer annotated objects. // We are looking only for the final source to find out if it was // a local or temporary owner or the address of a local @@ -1231,7 +1200,7 @@ static bool shouldRunGSLAssignmentAnalysis(const Sema &SemaRef, bool EnableGSLAssignmentWarnings = !SemaRef.getDiagnostics().isIgnored( diag::warn_dangling_lifetime_pointer_assignment, SourceLocation()); return (EnableGSLAssignmentWarnings && - (isRecordWithAttr(Entity.LHS->getType()) || + (isGslPointerType(Entity.LHS->getType()) || lifetimes::isAssignmentOperatorLifetimeBound( Entity.AssignmentOperator))); } @@ -1400,7 +1369,7 @@ checkExprLifetimeImpl(Sema &SemaRef, const InitializedEntity *InitEntity, // Suppress false positives for code like the one below: // Ctor(unique_ptr up) : pointer(up.get()), owner(move(up)) {} // FIXME: move this logic to analyzePathForGSLPointer. - if (DRE && isRecordWithAttr(DRE->getType())) + if (DRE && isGslOwnerType(DRE->getType())) return false; auto *VD = DRE ? dyn_cast(DRE->getDecl()) : nullptr; diff --git a/clang/lib/Sema/CheckExprLifetime.h b/clang/lib/Sema/CheckExprLifetime.h index 16595d0ca1b36..38b7061988dc7 100644 --- a/clang/lib/Sema/CheckExprLifetime.h +++ b/clang/lib/Sema/CheckExprLifetime.h @@ -18,9 +18,6 @@ namespace clang::sema { -// Tells whether the type is annotated with [[gsl::Pointer]]. -bool isGLSPointerType(QualType QT); - /// Describes an entity that is being assigned. struct AssignedEntity { // The left-hand side expression of the assignment. diff --git a/clang/lib/Sema/SemaAttr.cpp b/clang/lib/Sema/SemaAttr.cpp index 8411a3da8322d..7729c113e422e 100644 --- a/clang/lib/Sema/SemaAttr.cpp +++ b/clang/lib/Sema/SemaAttr.cpp @@ -11,11 +11,11 @@ // //===----------------------------------------------------------------------===// -#include "CheckExprLifetime.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/Expr.h" +#include "clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" @@ -289,7 +289,7 @@ void Sema::inferLifetimeCaptureByAttribute(FunctionDecl *FD) { // We only apply the lifetime_capture_by attribute to parameters of // pointer-like reference types (`const T&`, `T&&`). if (PVD->getType()->isReferenceType() && - sema::isGLSPointerType(PVD->getType().getNonReferenceType())) { + lifetimes::isGslPointerType(PVD->getType().getNonReferenceType())) { int CaptureByThis[] = {LifetimeCaptureByAttr::This}; PVD->addAttr( LifetimeCaptureByAttr::CreateImplicit(Context, CaptureByThis, 1)); diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index c808dec12a6cf..cfabd1b76c103 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -6736,14 +6736,13 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc, checkDirectCallValidity(*this, Fn, FD, ArgExprs); - // If this expression is a call to a builtin function in HIP device - // compilation, allow a pointer-type argument to default address space to be - // passed as a pointer-type parameter to a non-default address space. - // If Arg is declared in the default address space and Param is declared - // in a non-default address space, perform an implicit address space cast to - // the parameter type. - if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD && - FD->getBuiltinID()) { + // If this expression is a call to a builtin function in HIP compilation, + // allow a pointer-type argument to default address space to be passed as a + // pointer-type parameter to a non-default address space. If Arg is declared + // in the default address space and Param is declared in a non-default + // address space, perform an implicit address space cast to the parameter + // type. + if (getLangOpts().HIP && FD && FD->getBuiltinID()) { for (unsigned Idx = 0; Idx < ArgExprs.size() && Idx < FD->param_size(); ++Idx) { ParmVarDecl *Param = FD->getParamDecl(Idx); diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index 43bcb4f743cfa..d6f70e728be29 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -5658,20 +5658,13 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) { // extension. static bool isValidVectorForConditionalCondition(ASTContext &Ctx, QualType CondTy) { - if (!CondTy->isVectorType() && !CondTy->isExtVectorType()) + bool IsSVEVectorType = CondTy->isSveVLSBuiltinType(); + if (!CondTy->isVectorType() && !CondTy->isExtVectorType() && !IsSVEVectorType) return false; const QualType EltTy = - cast(CondTy.getCanonicalType())->getElementType(); - assert(!EltTy->isEnumeralType() && "Vectors cant be enum types"); - return EltTy->isIntegralType(Ctx); -} - -static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx, - QualType CondTy) { - if (!CondTy->isSveVLSBuiltinType()) - return false; - const QualType EltTy = - cast(CondTy.getCanonicalType())->getSveEltType(Ctx); + IsSVEVectorType + ? cast(CondTy.getCanonicalType())->getSveEltType(Ctx) + : cast(CondTy.getCanonicalType())->getElementType(); assert(!EltTy->isEnumeralType() && "Vectors cant be enum types"); return EltTy->isIntegralType(Ctx); } @@ -5683,21 +5676,29 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); QualType CondType = Cond.get()->getType(); - const auto *CondVT = CondType->castAs(); - QualType CondElementTy = CondVT->getElementType(); - unsigned CondElementCount = CondVT->getNumElements(); QualType LHSType = LHS.get()->getType(); - const auto *LHSVT = LHSType->getAs(); QualType RHSType = RHS.get()->getType(); - const auto *RHSVT = RHSType->getAs(); - QualType ResultType; + bool LHSIsVector = LHSType->isVectorType() || LHSType->isSizelessVectorType(); + bool RHSIsVector = RHSType->isVectorType() || RHSType->isSizelessVectorType(); + + auto GetVectorInfo = + [&](QualType Type) -> std::pair { + if (const auto *VT = Type->getAs()) + return std::make_pair(VT->getElementType(), + llvm::ElementCount::getFixed(VT->getNumElements())); + ASTContext::BuiltinVectorTypeInfo VectorInfo = + Context.getBuiltinVectorTypeInfo(Type->castAs()); + return std::make_pair(VectorInfo.ElementType, VectorInfo.EC); + }; + auto [CondElementTy, CondElementCount] = GetVectorInfo(CondType); - if (LHSVT && RHSVT) { - if (isa(CondVT) != isa(LHSVT)) { + QualType ResultType; + if (LHSIsVector && RHSIsVector) { + if (CondType->isExtVectorType() != LHSType->isExtVectorType()) { Diag(QuestionLoc, diag::err_conditional_vector_cond_result_mismatch) - << /*isExtVector*/ isa(CondVT); + << /*isExtVector*/ CondType->isExtVectorType(); return {}; } @@ -5708,12 +5709,17 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, return {}; } ResultType = Context.getCommonSugaredType(LHSType, RHSType); - } else if (LHSVT || RHSVT) { - ResultType = CheckVectorOperands( - LHS, RHS, QuestionLoc, /*isCompAssign*/ false, /*AllowBothBool*/ true, - /*AllowBoolConversions*/ false, - /*AllowBoolOperation*/ true, - /*ReportInvalid*/ true); + } else if (LHSIsVector || RHSIsVector) { + if (CondType->isSizelessVectorType()) + ResultType = CheckSizelessVectorOperands(LHS, RHS, QuestionLoc, + /*IsCompAssign*/ false, + ArithConvKind::Conditional); + else + ResultType = CheckVectorOperands( + LHS, RHS, QuestionLoc, /*isCompAssign*/ false, /*AllowBothBool*/ true, + /*AllowBoolConversions*/ false, + /*AllowBoolOperation*/ true, + /*ReportInvalid*/ true); if (ResultType.isNull()) return {}; } else { @@ -5731,24 +5737,33 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, << ResultElementTy; return {}; } - if (CondType->isExtVectorType()) - ResultType = - Context.getExtVectorType(ResultElementTy, CondVT->getNumElements()); - else - ResultType = Context.getVectorType( - ResultElementTy, CondVT->getNumElements(), VectorKind::Generic); - + if (CondType->isExtVectorType()) { + ResultType = Context.getExtVectorType(ResultElementTy, + CondElementCount.getFixedValue()); + } else if (CondType->isSizelessVectorType()) { + ResultType = Context.getScalableVectorType( + ResultElementTy, CondElementCount.getKnownMinValue()); + // There are not scalable vector type mappings for all element counts. + if (ResultType.isNull()) { + Diag(QuestionLoc, diag::err_conditional_vector_scalar_type_unsupported) + << ResultElementTy << CondType; + return {}; + } + } else { + ResultType = Context.getVectorType(ResultElementTy, + CondElementCount.getFixedValue(), + VectorKind::Generic); + } LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat); RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat); } - assert(!ResultType.isNull() && ResultType->isVectorType() && + assert(!ResultType.isNull() && + (ResultType->isVectorType() || ResultType->isSizelessVectorType()) && (!CondType->isExtVectorType() || ResultType->isExtVectorType()) && "Result should have been a vector type"); - auto *ResultVectorTy = ResultType->castAs(); - QualType ResultElementTy = ResultVectorTy->getElementType(); - unsigned ResultElementCount = ResultVectorTy->getNumElements(); + auto [ResultElementTy, ResultElementCount] = GetVectorInfo(ResultType); if (ResultElementCount != CondElementCount) { Diag(QuestionLoc, diag::err_conditional_vector_size) << CondType << ResultType; @@ -5767,90 +5782,6 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, return ResultType; } -QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond, - ExprResult &LHS, - ExprResult &RHS, - SourceLocation QuestionLoc) { - LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); - RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); - - QualType CondType = Cond.get()->getType(); - const auto *CondBT = CondType->castAs(); - QualType CondElementTy = CondBT->getSveEltType(Context); - llvm::ElementCount CondElementCount = - Context.getBuiltinVectorTypeInfo(CondBT).EC; - - QualType LHSType = LHS.get()->getType(); - const auto *LHSBT = - LHSType->isSveVLSBuiltinType() ? LHSType->getAs() : nullptr; - QualType RHSType = RHS.get()->getType(); - const auto *RHSBT = - RHSType->isSveVLSBuiltinType() ? RHSType->getAs() : nullptr; - - QualType ResultType; - - if (LHSBT && RHSBT) { - // If both are sizeless vector types, they must be the same type. - if (!Context.hasSameType(LHSType, RHSType)) { - Diag(QuestionLoc, diag::err_conditional_vector_mismatched) - << LHSType << RHSType; - return QualType(); - } - ResultType = LHSType; - } else if (LHSBT || RHSBT) { - ResultType = CheckSizelessVectorOperands(LHS, RHS, QuestionLoc, - /*IsCompAssign*/ false, - ArithConvKind::Conditional); - if (ResultType.isNull()) - return QualType(); - } else { - // Both are scalar so splat - QualType ResultElementTy; - LHSType = LHSType.getCanonicalType().getUnqualifiedType(); - RHSType = RHSType.getCanonicalType().getUnqualifiedType(); - - if (Context.hasSameType(LHSType, RHSType)) - ResultElementTy = LHSType; - else - ResultElementTy = UsualArithmeticConversions(LHS, RHS, QuestionLoc, - ArithConvKind::Conditional); - - if (ResultElementTy->isEnumeralType()) { - Diag(QuestionLoc, diag::err_conditional_vector_operand_type) - << ResultElementTy; - return QualType(); - } - - ResultType = Context.getScalableVectorType( - ResultElementTy, CondElementCount.getKnownMinValue()); - - LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat); - RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat); - } - - assert(!ResultType.isNull() && ResultType->isSveVLSBuiltinType() && - "Result should have been a vector type"); - auto *ResultBuiltinTy = ResultType->castAs(); - QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Context); - llvm::ElementCount ResultElementCount = - Context.getBuiltinVectorTypeInfo(ResultBuiltinTy).EC; - - if (ResultElementCount != CondElementCount) { - Diag(QuestionLoc, diag::err_conditional_vector_size) - << CondType << ResultType; - return QualType(); - } - - if (Context.getTypeSize(ResultElementTy) != - Context.getTypeSize(CondElementTy)) { - Diag(QuestionLoc, diag::err_conditional_vector_element_size) - << CondType << ResultType; - return QualType(); - } - - return ResultType; -} - QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, @@ -5864,14 +5795,10 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, bool IsVectorConditional = isValidVectorForConditionalCondition(Context, Cond.get()->getType()); - bool IsSizelessVectorConditional = - isValidSizelessVectorForConditionalCondition(Context, - Cond.get()->getType()); - // C++11 [expr.cond]p1 // The first expression is contextually converted to bool. if (!Cond.get()->isTypeDependent()) { - ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional + ExprResult CondRes = IsVectorConditional ? DefaultFunctionArrayLvalueConversion(Cond.get()) : CheckCXXBooleanCondition(Cond.get()); if (CondRes.isInvalid()) @@ -5940,9 +5867,6 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, if (IsVectorConditional) return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc); - if (IsSizelessVectorConditional) - return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc); - // WebAssembly tables are not allowed as conditional LHS or RHS. if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) { Diag(QuestionLoc, diag::err_wasm_table_conditional_expression) diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index 655fa31bbf5c7..6bb1a27d1800c 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -3889,6 +3889,11 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, // Update all declarations of the function to have the deduced return type. Context.adjustDeducedFunctionResultType(FD, Deduced); + if (!Deduced->isDependentType() && !Deduced->isRecordType() && + !FD->isFunctionTemplateSpecialization()) + diagnoseIgnoredQualifiers( + diag::warn_qual_return_type, + FD->getDeclaredReturnType().getLocalCVRQualifiers(), FD->getLocation()); return false; } diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp index eb8b1352d1be1..eaf95a8371c2f 100644 --- a/clang/lib/Sema/SemaType.cpp +++ b/clang/lib/Sema/SemaType.cpp @@ -5067,8 +5067,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, // cv-qualifiers on return types are pointless except when the type is a // class type in C++. if ((T.getCVRQualifiers() || T->isAtomicType()) && + // A dependent type or an undeduced type might later become a class + // type. !(S.getLangOpts().CPlusPlus && - (T->isDependentType() || T->isRecordType()))) { + (T->isRecordType() || T->isDependentType() || + T->isUndeducedAutoType()))) { if (T->isVoidType() && !S.getLangOpts().CPlusPlus && D.getFunctionDefinitionKind() == FunctionDefinitionKind::Definition) { diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp index 9bae12454d2dc..1d55f615de8a9 100644 --- a/clang/lib/Tooling/Tooling.cpp +++ b/clang/lib/Tooling/Tooling.cpp @@ -31,6 +31,7 @@ #include "clang/Frontend/TextDiagnosticPrinter.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/PreprocessorOptions.h" +#include "clang/Options/OptionUtils.h" #include "clang/Options/Options.h" #include "clang/Tooling/ArgumentsAdjusters.h" #include "clang/Tooling/CompilationDatabase.h" @@ -510,8 +511,7 @@ static void injectResourceDir(CommandLineArguments &Args, const char *Argv0, // If there's no override in place add our resource dir. Args = getInsertArgumentAdjuster( - ("-resource-dir=" + CompilerInvocation::GetResourcesPath(Argv0, MainAddr)) - .c_str())(Args, ""); + ("-resource-dir=" + GetResourcesPath(Argv0, MainAddr)).c_str())(Args, ""); } int ClangTool::run(ToolAction *Action) { diff --git a/clang/lib/Tooling/Transformer/SourceCode.cpp b/clang/lib/Tooling/Transformer/SourceCode.cpp index 922dafeddf416..fa9bf3427b8a0 100644 --- a/clang/lib/Tooling/Transformer/SourceCode.cpp +++ b/clang/lib/Tooling/Transformer/SourceCode.cpp @@ -86,8 +86,12 @@ llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range, return validateRange(Range, SM, /*AllowSystemHeaders=*/false); } -static bool spelledInMacroDefinition(SourceLocation Loc, - const SourceManager &SM) { +// Returns the location of the top-level macro argument that is the spelling for +// the expansion `Loc` is from. If `Loc` is spelled in the macro definition, +// returns an invalid `SourceLocation`. +static SourceLocation getMacroArgumentSpellingLoc(SourceLocation Loc, + const SourceManager &SM) { + assert(Loc.isMacroID() && "Location must be in a macro"); while (Loc.isMacroID()) { const auto &Expansion = SM.getSLocEntry(SM.getFileID(Loc)).getExpansion(); if (Expansion.isMacroArgExpansion()) { @@ -95,9 +99,26 @@ static bool spelledInMacroDefinition(SourceLocation Loc, // in a macro expansion. Loc = Expansion.getSpellingLoc(); } else { - return true; + return {}; } } + return Loc; +} + +static bool spelledInMacroDefinition(CharSourceRange Range, + const SourceManager &SM) { + if (Range.getBegin().isMacroID() && Range.getEnd().isMacroID()) { + // Check whether the range is entirely within a single macro argument. + auto B = getMacroArgumentSpellingLoc(Range.getBegin(), SM); + auto E = getMacroArgumentSpellingLoc(Range.getEnd(), SM); + return B.isInvalid() || B != E; + } + + if (Range.getBegin().isMacroID()) + return getMacroArgumentSpellingLoc(Range.getBegin(), SM).isInvalid(); + if (Range.getEnd().isMacroID()) + return getMacroArgumentSpellingLoc(Range.getEnd(), SM).isInvalid(); + return false; } @@ -158,8 +179,7 @@ static CharSourceRange getRange(const CharSourceRange &EditRange, Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts); } else { auto AdjustedRange = getRangeForSplitTokens(EditRange, SM, LangOpts); - if (spelledInMacroDefinition(AdjustedRange.getBegin(), SM) || - spelledInMacroDefinition(AdjustedRange.getEnd(), SM)) + if (spelledInMacroDefinition(AdjustedRange, SM)) return {}; auto B = SM.getSpellingLoc(AdjustedRange.getBegin()); diff --git a/clang/test/CIR/CodeGen/X86/avx512bw-builtins.c b/clang/test/CIR/CodeGen/X86/avx512bw-builtins.c deleted file mode 100644 index 3522e2c7e50bf..0000000000000 --- a/clang/test/CIR/CodeGen/X86/avx512bw-builtins.c +++ /dev/null @@ -1,117 +0,0 @@ -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-cir -o %t.cir -Wall -Werror -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-llvm -o %t.ll -Wall -Werror -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-cir -o %t.cir -Wall -Werror -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-llvm -o %t.ll -Wall -Werror -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG - -// This test mimics clang/test/CodeGen/X86/avx512bw-builtins.c, which eventually -// CIR shall be able to support fully. - -#include - -__mmask32 test_kshiftli_mask32(__mmask32 A) { - // CIR-LABEL: test_kshiftli_mask32 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<32 x !cir.int>) [#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : !s32i, #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, #cir.int<14> : !s32i, #cir.int<15> : !s32i, #cir.int<16> : !s32i, #cir.int<17> : !s32i, #cir.int<18> : !s32i, #cir.int<19> : !s32i, #cir.int<20> : !s32i, #cir.int<21> : !s32i, #cir.int<22> : !s32i, #cir.int<23> : !s32i, #cir.int<24> : !s32i, #cir.int<25> : !s32i, #cir.int<26> : !s32i, #cir.int<27> : !s32i, #cir.int<28> : !s32i, #cir.int<29> : !s32i, #cir.int<30> : !s32i, #cir.int<31> : !s32i, #cir.int<32> : !s32i] : !cir.vector<32 x !cir.int> - - // LLVM-LABEL: test_kshiftli_mask32 - // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // LLVM: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> - - // OGCG-LABEL: test_kshiftli_mask32 - // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // OGCG: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> - return _kshiftli_mask32(A, 31); -} - -__mmask32 test_kshiftri_mask32(__mmask32 A) { - // CIR-LABEL: test_kshiftri_mask32 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<32 x !cir.int>) [#cir.int<31> : !s32i, #cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i] : !cir.vector<32 x !cir.int> - - // LLVM-LABEL: test_kshiftri_mask32 - // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // LLVM: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> - - // OGCG-LABEL: test_kshiftri_mask32 - // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> - // OGCG: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> - return _kshiftri_mask32(A, 31); -} - -__mmask64 test_kshiftli_mask64(__mmask64 A) { - // CIR-LABEL: test_kshiftli_mask64 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> - - // LLVM-LABEL: test_kshiftli_mask64 - // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // LLVM: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> - - // OGCG-LABEL: test_kshiftli_mask64 - // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // OGCG: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> - return _kshiftli_mask64(A, 32); -} - -__mmask64 test_kshiftri_mask64(__mmask64 A) { - // CIR-LABEL: test_kshiftri_mask64 - // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> - // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> - // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> - - // LLVM-LABEL: test_kshiftri_mask64 - // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // LLVM: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> - - // OGCG-LABEL: test_kshiftri_mask64 - // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> - // OGCG: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> - return _kshiftri_mask64(A, 32); -} - -__mmask32 test_kshiftli_mask32_out_of_range(__mmask32 A) { - // CIR-LABEL: test_kshiftli_mask32_out_of_range - // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i - // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr - // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i - // CIR: cir.return [[RES]] : !u32i - - // LLVM-LABEL: test_kshiftli_mask32_out_of_range - // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 - // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 - // LLVM: ret i32 [[RES]] - - // OGCG-LABEL: test_kshiftli_mask32_out_of_range - // OGCG: ret i32 0 - - return _kshiftli_mask32(A, 33); -} - -__mmask32 test_kshiftri_mask32_out_of_range(__mmask32 A) { - // CIR-LABEL: test_kshiftri_mask32_out_of_range - // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i - // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr - // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i - // CIR: cir.return [[RES]] : !u32i - - // LLVM-LABEL: test_kshiftri_mask32_out_of_range - // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 - // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 - // LLVM: ret i32 [[RES]] - - // OGCG-LABEL: test_kshiftri_mask32_out_of_range - // OGCG: ret i32 0 - - return _kshiftri_mask32(A, 33); -} diff --git a/clang/test/CIR/CodeGen/X86/avx512f-builtins.c b/clang/test/CIR/CodeGen/X86/avx512f-builtins.c deleted file mode 100644 index dc54a87856a7c..0000000000000 --- a/clang/test/CIR/CodeGen/X86/avx512f-builtins.c +++ /dev/null @@ -1,79 +0,0 @@ -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion -// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s - -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG -// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG -// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG - -#include - -__m512 test_mm512_undefined(void) { - // CIR-LABEL: _mm512_undefined - // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> - // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> - - // LLVM-LABEL: test_mm512_undefined - // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 - // LLVM: ret <16 x float> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined - // OGCG: ret <16 x float> zeroinitializer - return _mm512_undefined(); -} - -__m512 test_mm512_undefined_ps(void) { - // CIR-LABEL: _mm512_undefined_ps - // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> - // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> - - // LLVM-LABEL: test_mm512_undefined_ps - // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 - // LLVM: ret <16 x float> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined_ps - // OGCG: ret <16 x float> zeroinitializer - return _mm512_undefined_ps(); -} - -__m512d test_mm512_undefined_pd(void) { - // CIR-LABEL: _mm512_undefined_pd - // CIR: %{{.*}} = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: cir.return %{{.*}} : !cir.vector<8 x !cir.double> - - // LLVM-LABEL: test_mm512_undefined_pd - // LLVM: store <8 x double> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <8 x double>, ptr %[[A]], align 64 - // LLVM: ret <8 x double> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined_pd - // OGCG: ret <8 x double> zeroinitializer - return _mm512_undefined_pd(); -} - -__m512i test_mm512_undefined_epi32(void) { - // CIR-LABEL: _mm512_undefined_epi32 - // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> - // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<8 x !s64i> - // CIR: cir.return %{{.*}} : !cir.vector<8 x !s64i> - - // LLVM-LABEL: test_mm512_undefined_epi32 - // LLVM: store <8 x i64> zeroinitializer, ptr %[[A:.*]], align 64 - // LLVM: %{{.*}} = load <8 x i64>, ptr %[[A]], align 64 - // LLVM: ret <8 x i64> %{{.*}} - - // OGCG-LABEL: test_mm512_undefined_epi32 - // OGCG: ret <8 x i64> zeroinitializer - return _mm512_undefined_epi32(); -} diff --git a/clang/test/CIR/CodeGen/count-of.c b/clang/test/CIR/CodeGen/count-of.c new file mode 100644 index 0000000000000..1fd1290c42e6b --- /dev/null +++ b/clang/test/CIR/CodeGen/count-of.c @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 -std=c2y -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c2y -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -std=c2y -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG + +unsigned long vla_with_array_element_type_with_const_size() { + long size; + return _Countof(int[5][size]); +} + +// CIR: %[[RET_ADDR:.*]] = cir.alloca !u64i, !cir.ptr, ["__retval"] +// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !s64i, !cir.ptr, ["size"] +// CIR: %[[CONST_5:.*]] = cir.const #cir.int<5> : !u64i +// CIR: cir.store %[[CONST_5]], %[[RET_ADDR]] : !u64i, !cir.ptr +// CIR: %[[RET_VAL:.*]] = cir.load %[[RET_ADDR]] : !cir.ptr, !u64i +// CIR: cir.return %[[RET_VAL]] : !u64i + +// LLVM: %[[RET_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: store i64 5, ptr %[[RET_ADDR]], align 8 +// LLVM: %[[RET_VAL:.*]] = load i64, ptr %[[RET_ADDR]], align 8 +// LLVM: ret i64 %[[RET_VAL]] + +// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8 +// OGCG: ret i64 5 + +unsigned long vla_with_array_element_type_non_const_size() { + long size; + return _Countof(int[size][size]); +} + +// CIR: %[[REET_ADDR:.*]] = cir.alloca !u64i, !cir.ptr, ["__retval"] +// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !s64i, !cir.ptr, ["size"] +// CIR: %[[TMP_SIZE:.*]] = cir.load {{.*}} %[[SIZE_ADDR]] : !cir.ptr, !s64i +// CIR: %[[TMP_SIZE_U64:.*]] = cir.cast integral %[[TMP_SIZE]] : !s64i -> !u64i +// CIR: cir.store %[[TMP_SIZE_U64]], %[[RET_ADDR]] : !u64i, !cir.ptr +// CIR: %[[TMP_RET:.*]] = cir.load %[[RET_ADDR]] : !cir.ptr, !u64i +// CIR: cir.return %[[TMP_RET]] : !u64i + +// LLVM: %[[RET_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8 +// LLVM: %[[TMP_SIZE:.*]] = load i64, ptr %[[SIZE_ADDR]], align 8 +// LLVM: store i64 %[[TMP_SIZE]], ptr %[[RET_ADDR]], align 8 +// LLVM: %[[TMP_RET:.*]] = load i64, ptr %[[RET_ADDR]], align 8 +// LLVM: ret i64 %[[TMP_RET]] + +// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8 +// OGCG: %[[TMP_SIZE:.*]] = load i64, ptr %[[SIZE_ADDR]], align 8 +// OGCG: %[[TMP_SIZE_2:.*]] = load i64, ptr %[[SIZE_ADDR]], align 8 +// OGCG: ret i64 %[[TMP_SIZE]] diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 91380b9bea296..1d06496a85530 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -8,6 +8,39 @@ // We declare anonymous record types to represent lambdas. Rather than trying to // to match the declarations, we establish variables for these when they are used. +auto global_lambda = [](){}; +void use_global_lambda() { + global_lambda(); +} + +// CIR: cir.global "private" internal dso_local @global_lambda = #cir.undef : ![[REC_LAM_GLOBAL_LAMBDA:.*]] {alignment = 1 : i64} +// CIR: cir.func lambda internal private dso_local @_ZNK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr {{.*}}) +// CIR: %[[THIS:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS]] +// CIR: cir.load %[[THIS]] +// +// CIR: cir.func {{.*}} @_Z17use_global_lambdav() +// CIR: %[[LAMBDA:.*]] = cir.get_global @global_lambda : !cir.ptr +// CIR: cir.call @_ZNK3$_0clEv(%[[LAMBDA]]) : (!cir.ptr) -> () + +// LLVM: @global_lambda = internal global %[[REC_LAM_GLOBAL_LAMBDA:.*]] undef, align 1 +// LLVM: define internal void @"_ZNK3$_0clEv"(ptr %[[THIS_ARG:.*]]) +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// +// LLVM: define dso_local void @_Z17use_global_lambdav() +// LLVM: call void @"_ZNK3$_0clEv"(ptr @global_lambda) + +// OGCG: @global_lambda = internal global %[[REC_LAM_GLOBAL_LAMBDA:.*]] undef, align 1 +// OGCG: define dso_local void @_Z17use_global_lambdav() +// OGCG: call void @"_ZNK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) @global_lambda) +// +// OGCG: define internal void @"_ZNK3$_0clEv"(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] + void fn() { auto a = [](){}; a(); diff --git a/clang/test/CIR/CodeGen/X86/avx-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/avx10_2_512bf16-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx10_2_512bf16-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx10_2_512bf16-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx10_2_512bf16-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/avx10_2bf16-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx10_2bf16-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx10_2bf16-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx10_2bf16-builtins.c diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c new file mode 100644 index 0000000000000..13f6475553109 --- /dev/null +++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c @@ -0,0 +1,466 @@ +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-cir -o %t.cir -Wall -Werror +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fclangir -emit-llvm -o %t.ll -Wall -Werror +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-cir -o %t.cir -Wall -Werror +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -fno-signed-char -fclangir -emit-llvm -o %t.ll -Wall -Werror +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512bw -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG + +// This test mimics clang/test/CodeGen/X86/avx512bw-builtins.c, which eventually +// CIR shall be able to support fully. + +#include + +__mmask32 test_kshiftli_mask32(__mmask32 A) { + // CIR-LABEL: test_kshiftli_mask32 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<32 x !cir.int>) [#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : !s32i, #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, #cir.int<14> : !s32i, #cir.int<15> : !s32i, #cir.int<16> : !s32i, #cir.int<17> : !s32i, #cir.int<18> : !s32i, #cir.int<19> : !s32i, #cir.int<20> : !s32i, #cir.int<21> : !s32i, #cir.int<22> : !s32i, #cir.int<23> : !s32i, #cir.int<24> : !s32i, #cir.int<25> : !s32i, #cir.int<26> : !s32i, #cir.int<27> : !s32i, #cir.int<28> : !s32i, #cir.int<29> : !s32i, #cir.int<30> : !s32i, #cir.int<31> : !s32i, #cir.int<32> : !s32i] : !cir.vector<32 x !cir.int> + + // LLVM-LABEL: test_kshiftli_mask32 + // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> + + // OGCG-LABEL: test_kshiftli_mask32 + // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // OGCG: [[RES:%.*]] = shufflevector <32 x i1> zeroinitializer, <32 x i1> [[VAL]], <32 x i32> + return _kshiftli_mask32(A, 31); +} + +__mmask32 test_kshiftri_mask32(__mmask32 A) { + // CIR-LABEL: test_kshiftri_mask32 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<32 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<32 x !cir.int>) [#cir.int<31> : !s32i, #cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i] : !cir.vector<32 x !cir.int> + + // LLVM-LABEL: test_kshiftri_mask32 + // LLVM: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> + + // OGCG-LABEL: test_kshiftri_mask32 + // OGCG: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // OGCG: [[RES:%.*]] = shufflevector <32 x i1> [[VAL]], <32 x i1> zeroinitializer, <32 x i32> + return _kshiftri_mask32(A, 31); +} + +__mmask64 test_kshiftli_mask64(__mmask64 A) { + // CIR-LABEL: test_kshiftli_mask64 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[SHIFT]], [[VAL]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> + + // LLVM-LABEL: test_kshiftli_mask64 + // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> + + // OGCG-LABEL: test_kshiftli_mask64 + // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // OGCG: [[RES:%.*]] = shufflevector <64 x i1> zeroinitializer, <64 x i1> [[VAL]], <64 x i32> + return _kshiftli_mask64(A, 32); +} + +__mmask64 test_kshiftri_mask64(__mmask64 A) { + // CIR-LABEL: test_kshiftri_mask64 + // CIR: [[VAL:%.*]] = cir.cast bitcast %{{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: [[SHIFT:%.*]] = cir.const #cir.zero : !cir.vector<64 x !cir.int> + // CIR: %{{.*}} = cir.vec.shuffle([[VAL]], [[SHIFT]] : !cir.vector<64 x !cir.int>) [#cir.int<32> : !s32i, #cir.int<33> : !s32i, #cir.int<34> : !s32i, #cir.int<35> : !s32i, #cir.int<36> : !s32i, #cir.int<37> : !s32i, #cir.int<38> : !s32i, #cir.int<39> : !s32i, #cir.int<40> : !s32i, #cir.int<41> : !s32i, #cir.int<42> : !s32i, #cir.int<43> : !s32i, #cir.int<44> : !s32i, #cir.int<45> : !s32i, #cir.int<46> : !s32i, #cir.int<47> : !s32i, #cir.int<48> : !s32i, #cir.int<49> : !s32i, #cir.int<50> : !s32i, #cir.int<51> : !s32i, #cir.int<52> : !s32i, #cir.int<53> : !s32i, #cir.int<54> : !s32i, #cir.int<55> : !s32i, #cir.int<56> : !s32i, #cir.int<57> : !s32i, #cir.int<58> : !s32i, #cir.int<59> : !s32i, #cir.int<60> : !s32i, #cir.int<61> : !s32i, #cir.int<62> : !s32i, #cir.int<63> : !s32i, #cir.int<64> : !s32i, #cir.int<65> : !s32i, #cir.int<66> : !s32i, #cir.int<67> : !s32i, #cir.int<68> : !s32i, #cir.int<69> : !s32i, #cir.int<70> : !s32i, #cir.int<71> : !s32i, #cir.int<72> : !s32i, #cir.int<73> : !s32i, #cir.int<74> : !s32i, #cir.int<75> : !s32i, #cir.int<76> : !s32i, #cir.int<77> : !s32i, #cir.int<78> : !s32i, #cir.int<79> : !s32i, #cir.int<80> : !s32i, #cir.int<81> : !s32i, #cir.int<82> : !s32i, #cir.int<83> : !s32i, #cir.int<84> : !s32i, #cir.int<85> : !s32i, #cir.int<86> : !s32i, #cir.int<87> : !s32i, #cir.int<88> : !s32i, #cir.int<89> : !s32i, #cir.int<90> : !s32i, #cir.int<91> : !s32i, #cir.int<92> : !s32i, #cir.int<93> : !s32i, #cir.int<94> : !s32i, #cir.int<95> : !s32i] : !cir.vector<64 x !cir.int> + + // LLVM-LABEL: test_kshiftri_mask64 + // LLVM: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> + + // OGCG-LABEL: test_kshiftri_mask64 + // OGCG: [[VAL:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // OGCG: [[RES:%.*]] = shufflevector <64 x i1> [[VAL]], <64 x i1> zeroinitializer, <64 x i32> + return _kshiftri_mask64(A, 32); +} + +__mmask32 test_kshiftli_mask32_out_of_range(__mmask32 A) { + // CIR-LABEL: test_kshiftli_mask32_out_of_range + // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i + // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr + // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i + // CIR: cir.return [[RES]] : !u32i + + // LLVM-LABEL: test_kshiftli_mask32_out_of_range + // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 + // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 + // LLVM: ret i32 [[RES]] + + // OGCG-LABEL: test_kshiftli_mask32_out_of_range + // OGCG: ret i32 0 + + return _kshiftli_mask32(A, 33); +} + +__mmask32 test_kshiftri_mask32_out_of_range(__mmask32 A) { + // CIR-LABEL: test_kshiftri_mask32_out_of_range + // CIR: [[VAL:%.*]] = cir.const #cir.int<0> : !u32i + // CIR: cir.store [[VAL]], {{%.*}} : !u32i, !cir.ptr + // CIR: [[RES:%.*]] = cir.load {{%.*}} : !cir.ptr, !u32i + // CIR: cir.return [[RES]] : !u32i + + // LLVM-LABEL: test_kshiftri_mask32_out_of_range + // LLVM: store i32 0, ptr [[VAL:%.*]], align 4 + // LLVM: [[RES:%.*]] = load i32, ptr [[VAL]], align 4 + // LLVM: ret i32 [[RES]] + + // OGCG-LABEL: test_kshiftri_mask32_out_of_range + // OGCG: ret i32 0 + + return _kshiftri_mask32(A, 33); +} + +__mmask32 test_kadd_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kadd_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.d" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kadd_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = call <32 x i1> @llvm.x86.avx512.kadd.d(<32 x i1> [[L]], <32 x i1> [[R]]) + // LLVM: bitcast <32 x i1> [[RES]] to i32 + + // OGCG-LABEL: _kadd_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: call <32 x i1> @llvm.x86.avx512.kadd.d + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kadd_mask32(A, B); +} + +__mmask64 test_kadd_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kadd_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.q" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kadd_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = call <64 x i1> @llvm.x86.avx512.kadd.q(<64 x i1> [[L]], <64 x i1> [[R]]) + // LLVM: bitcast <64 x i1> [[RES]] to i64 + + // OGCG-LABEL: _kadd_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: call <64 x i1> @llvm.x86.avx512.kadd.q + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kadd_mask64(A, B); +} + +__mmask32 test_kand_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kand_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kand_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[RES:%.*]] = and <32 x i1> [[L]], [[R]] + // LLVM: bitcast <32 x i1> [[RES]] to i32 + + // OGCG-LABEL: _kand_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: and <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kand_mask32(A, B); +} + +__mmask64 test_kand_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kand_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kand_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[RES:%.*]] = and <64 x i1> [[L]], [[R]] + // LLVM: bitcast <64 x i1> [[RES]] to i64 + + // OGCG-LABEL: _kand_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: and <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kand_mask64(A, B); +} + +__mmask32 test_kandn_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kandn_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kandn_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: xor <32 x i1> [[L]], splat (i1 true) + // LLVM: and <32 x i1> + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _kandn_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: and <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kandn_mask32(A, B); +} + +__mmask64 test_kandn_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kandn_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kandn_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: xor <64 x i1> [[L]], splat (i1 true) + // LLVM: and <64 x i1> + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _kandn_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: and <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kandn_mask64(A, B); +} + +__mmask32 test_kor_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kor_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kor_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: or <32 x i1> [[L]], [[R]] + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _kor_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: or <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kor_mask32(A, B); +} + +__mmask64 test_kor_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kor_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kor_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: or <64 x i1> [[L]], [[R]] + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _kor_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: or <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kor_mask64(A, B); +} + +__mmask32 test_kxor_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kxor_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kxor_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: xor <32 x i1> [[L]], [[R]] + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _kxor_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _kxor_mask32(A, B); +} + +__mmask64 test_kxor_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kxor_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kxor_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: xor <64 x i1> [[L]], [[R]] + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _kxor_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _kxor_mask64(A, B); +} + +__mmask32 test_kxnor_mask32(__mmask32 A, __mmask32 B) { + // CIR-LABEL: _kxnor_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _kxnor_mask32 + // LLVM: [[L:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[R:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // LLVM: [[NOT:%.*]] = xor <32 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <32 x i1> [[NOT]], [[R]] + // LLVM: bitcast <32 x i1> [[RES]] to i32 + + // OGCG-LABEL: _kxnor_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + + return _kxnor_mask32(A, B); +} + +__mmask64 test_kxnor_mask64(__mmask64 A, __mmask64 B) { + // CIR-LABEL: _kxnor_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _kxnor_mask64 + // LLVM: [[L:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[R:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // LLVM: [[NOT:%.*]] = xor <64 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <64 x i1> [[NOT]], [[R]] + // LLVM: bitcast <64 x i1> [[RES]] to i64 + + // OGCG-LABEL: _kxnor_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + + return _kxnor_mask64(A, B); +} + + +__mmask32 test_knot_mask32(__mmask32 A) { + // CIR-LABEL: _knot_mask32 + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: _knot_mask32 + // LLVM: bitcast i32 %{{.*}} to <32 x i1> + // LLVM: xor <32 x i1> + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: _knot_mask32 + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: xor <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + return _knot_mask32(A); +} + +__mmask64 test_knot_mask64(__mmask64 A) { + // CIR-LABEL: _knot_mask64 + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: _knot_mask64 + // LLVM: bitcast i64 %{{.*}} to <64 x i1> + // LLVM: xor <64 x i1> + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: _knot_mask64 + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: xor <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + return _knot_mask64(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask32 test_kmov_d(__mmask32 A) { + // CIR-LABEL: test_kmov_d + // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int> -> !u32i + + // LLVM-LABEL: test_kmov_d + // LLVM: bitcast i32 %{{.*}} to <32 x i1> + // LLVM: bitcast <32 x i1> {{.*}} to i32 + + // OGCG-LABEL: test_kmov_d + // OGCG: bitcast i32 %{{.*}} to <32 x i1> + // OGCG: bitcast <32 x i1> {{.*}} to i32 + + return __builtin_ia32_kmovd(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask64 test_kmov_q(__mmask64 A) { + // CIR-LABEL: test_kmov_q + // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int> -> !u64i + + // LLVM-LABEL: test_kmov_q + // LLVM: bitcast i64 %{{.*}} to <64 x i1> + // LLVM: bitcast <64 x i1> {{.*}} to i64 + + // OGCG-LABEL: test_kmov_q + // OGCG: bitcast i64 %{{.*}} to <64 x i1> + // OGCG: bitcast <64 x i1> {{.*}} to i64 + + return __builtin_ia32_kmovq(A); +} diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c new file mode 100644 index 0000000000000..5b0d26062fcfb --- /dev/null +++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c @@ -0,0 +1,210 @@ + // RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fclangir -emit-cir -o %t.cir -Wall -Werror + // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + // RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fclangir -emit-llvm -o %t.ll -Wall -Werror + // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + + // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fno-signed-char -fclangir -emit-cir -o %t.cir -Wall -Werror + // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -fno-signed-char -fclangir -emit-llvm -o %t.ll -Wall -Werror + // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + + // RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG + // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s -check-prefix=OGCG + + #include + +__mmask8 test_kadd_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kadd_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.b" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kadd_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[RES:%.*]] = call <8 x i1> @llvm.x86.avx512.kadd.b(<8 x i1> [[L]], <8 x i1> [[R]]) + // LLVM: bitcast <8 x i1> [[RES]] to i8 + + // OGCG-LABEL: _kadd_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: call <8 x i1> @llvm.x86.avx512.kadd.b + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kadd_mask8(A, B); +} + +__mmask16 test_kadd_mask16(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _kadd_mask16 + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.call_llvm_intrinsic "x86.avx512.kadd.w" + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _kadd_mask16 + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[RES:%.*]] = call <16 x i1> @llvm.x86.avx512.kadd.w(<16 x i1> [[L]], <16 x i1> [[R]]) + // LLVM: bitcast <16 x i1> [[RES]] to i16 + + // OGCG-LABEL: _kadd_mask16 + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: call <16 x i1> @llvm.x86.avx512.kadd.w + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _kadd_mask16(A, B); +} + +__mmask8 test_kand_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kand_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kand_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[RES:%.*]] = and <8 x i1> [[L]], [[R]] + // LLVM: bitcast <8 x i1> [[RES]] to i8 + + // OGCG-LABEL: _kand_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: and <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kand_mask8(A, B); +} + + +__mmask8 test_kandn_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kandn_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kandn_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: xor <8 x i1> [[L]], splat (i1 true) + // LLVM: and <8 x i1> + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _kandn_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: and <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + + return _kandn_mask8(A, B); +} + +__mmask8 test_kor_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kor_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kor_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: or <8 x i1> [[L]], [[R]] + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _kor_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: or <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kor_mask8(A, B); +} + +__mmask8 test_kxor_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kxor_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kxor_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: xor <8 x i1> [[L]], [[R]] + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _kxor_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kxor_mask8(A, B); +} + +__mmask8 test_kxnor_mask8(__mmask8 A, __mmask8 B) { + // CIR-LABEL: _kxnor_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _kxnor_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[R:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: [[NOT:%.*]] = xor <8 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <8 x i1> [[NOT]], [[R]] + // LLVM: bitcast <8 x i1> [[RES]] to i8 + + // OGCG-LABEL: _kxnor_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _kxnor_mask8(A, B); +} + + +__mmask8 test_knot_mask8(__mmask8 A) { + // CIR-LABEL: _knot_mask8 + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: _knot_mask8 + // LLVM: [[L:%.*]] = bitcast i8 %{{.*}} to <8 x i1> + // LLVM: xor <8 x i1> [[L]], {{.*}} + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: _knot_mask8 + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: xor <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return _knot_mask8(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask8 test_kmov_b(__mmask8 A) { + // CIR-LABEL: test_kmov_b + // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int> -> !u8i + + // LLVM-LABEL: test_kmov_b + // LLVM: bitcast i8 %{{.*}} to <8 x i1> + // LLVM: bitcast <8 x i1> {{.*}} to i8 + + // OGCG-LABEL: test_kmov_b + // OGCG: bitcast i8 %{{.*}} to <8 x i1> + // OGCG: bitcast <8 x i1> {{.*}} to i8 + return __builtin_ia32_kmovb(A); +} diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c new file mode 100644 index 0000000000000..31d6bc3d22408 --- /dev/null +++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c @@ -0,0 +1,230 @@ +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-cir -o %t.cir -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux -target-feature +avx512f -fclangir -emit-llvm -o %t.ll -Wall -Werror -Wsign-conversion +// RUN: FileCheck --check-prefixes=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror -Wsign-conversion | FileCheck %s --check-prefixes=OGCG + +#include + +__m512 test_mm512_undefined(void) { + // CIR-LABEL: _mm512_undefined + // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> + // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> + + // LLVM-LABEL: test_mm512_undefined + // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 + // LLVM: ret <16 x float> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined + // OGCG: ret <16 x float> zeroinitializer + return _mm512_undefined(); +} + +__m512 test_mm512_undefined_ps(void) { + // CIR-LABEL: _mm512_undefined_ps + // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<16 x !cir.float> + // CIR: cir.return %{{.*}} : !cir.vector<16 x !cir.float> + + // LLVM-LABEL: test_mm512_undefined_ps + // LLVM: store <16 x float> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <16 x float>, ptr %[[A]], align 64 + // LLVM: ret <16 x float> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined_ps + // OGCG: ret <16 x float> zeroinitializer + return _mm512_undefined_ps(); +} + +__m512d test_mm512_undefined_pd(void) { + // CIR-LABEL: _mm512_undefined_pd + // CIR: %{{.*}} = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: cir.return %{{.*}} : !cir.vector<8 x !cir.double> + + // LLVM-LABEL: test_mm512_undefined_pd + // LLVM: store <8 x double> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <8 x double>, ptr %[[A]], align 64 + // LLVM: ret <8 x double> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined_pd + // OGCG: ret <8 x double> zeroinitializer + return _mm512_undefined_pd(); +} + +__m512i test_mm512_undefined_epi32(void) { + // CIR-LABEL: _mm512_undefined_epi32 + // CIR: %[[A:.*]] = cir.const #cir.zero : !cir.vector<8 x !cir.double> + // CIR: %{{.*}} = cir.cast bitcast %[[A]] : !cir.vector<8 x !cir.double> -> !cir.vector<8 x !s64i> + // CIR: cir.return %{{.*}} : !cir.vector<8 x !s64i> + + // LLVM-LABEL: test_mm512_undefined_epi32 + // LLVM: store <8 x i64> zeroinitializer, ptr %[[A:.*]], align 64 + // LLVM: %{{.*}} = load <8 x i64>, ptr %[[A]], align 64 + // LLVM: ret <8 x i64> %{{.*}} + + // OGCG-LABEL: test_mm512_undefined_epi32 + // OGCG: ret <8 x i64> zeroinitializer + return _mm512_undefined_epi32(); +} + +__mmask16 test_mm512_kand(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kand + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kand + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[RES:%.*]] = and <16 x i1> [[L]], [[R]] + // LLVM: bitcast <16 x i1> [[RES]] to i16 + + // OGCG-LABEL: _mm512_kand + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: and <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kand(A, B); +} + +__mmask16 test_mm512_kandn(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kandn + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.binop(and, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kandn + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: xor <16 x i1> [[L]], splat (i1 true) + // LLVM: and <16 x i1> + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_kandn + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: and <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kandn(A, B); +} + +__mmask16 test_mm512_kor(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kor + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.binop(or, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kor + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: or <16 x i1> [[L]], [[R]] + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_kor + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: or <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kor(A, B); +} + +__mmask16 test_mm512_kxnor(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kxnor + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kxnor + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[NOT:%.*]] = xor <16 x i1> [[L]], splat (i1 true) + // LLVM: [[RES:%.*]] = xor <16 x i1> [[NOT]], [[R]] + // LLVM: bitcast <16 x i1> [[RES]] to i16 + + // OGCG-LABEL: _mm512_kxnor + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kxnor(A, B); +} + +__mmask16 test_mm512_kxor(__mmask16 A, __mmask16 B) { + // CIR-LABEL: _mm512_kxor + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.binop(xor, {{.*}}, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_kxor + // LLVM: [[L:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: [[R:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // LLVM: xor <16 x i1> [[L]], [[R]] + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_kxor + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_kxor(A, B); +} + +__mmask16 test_mm512_knot(__mmask16 A) { + // CIR-LABEL: _mm512_knot + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: _mm512_knot + // LLVM: bitcast i16 %{{.*}} to <16 x i1> + // LLVM: xor <16 x i1> + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: _mm512_knot + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: xor <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return _mm512_knot(A); +} + +// Multiple user-level mask helpers inline to this same kmov builtin. +// CIR does not implement any special lowering for those helpers. +// +// Therefore, testing the builtin (__builtin_ia32_kmov*) directly is +// sufficient to cover the CIR lowering behavior. Testing each helper +// individually would add no new CIR paths. + +__mmask16 test_kmov_w(__mmask16 A) { + // CIR-LABEL: test_kmov_w + // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int> + // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int> -> !u16i + + // LLVM-LABEL: test_kmov_w + // LLVM: bitcast i16 %{{.*}} to <16 x i1> + // LLVM: bitcast <16 x i1> {{.*}} to i16 + + // OGCG-LABEL: test_kmov_w + // OGCG: bitcast i16 %{{.*}} to <16 x i1> + // OGCG: bitcast <16 x i1> {{.*}} to i16 + return __builtin_ia32_kmovw(A); +} diff --git a/clang/test/CIR/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512fp16-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/avx512fp16-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/avx512fp16-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/bmi-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/bmi-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/bmi-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/bmi-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/lzcnt-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/lzcnt-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/lzcnt-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/lzcnt-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/sse-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/sse-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/sse-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/sse-builtins.c diff --git a/clang/test/CIR/CodeGen/X86/sse2-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/sse2-builtins.c similarity index 100% rename from clang/test/CIR/CodeGen/X86/sse2-builtins.c rename to clang/test/CIR/CodeGenBuiltins/X86/sse2-builtins.c diff --git a/clang/test/CIR/CodeGen/builtin-fcmp-sse.c b/clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin-fcmp-sse.c rename to clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c diff --git a/clang/test/CIR/CodeGen/builtin-isfpclass.c b/clang/test/CIR/CodeGenBuiltins/builtin-isfpclass.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin-isfpclass.c rename to clang/test/CIR/CodeGenBuiltins/builtin-isfpclass.c diff --git a/clang/test/CIR/CodeGen/builtin_bit.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_bit.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_bit.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_bit.cpp diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_call.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_call.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_call.cpp diff --git a/clang/test/CIR/CodeGen/builtin_inline.c b/clang/test/CIR/CodeGenBuiltins/builtin_inline.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin_inline.c rename to clang/test/CIR/CodeGenBuiltins/builtin_inline.c diff --git a/clang/test/CIR/CodeGen/builtin_new_delete.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_new_delete.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_new_delete.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_new_delete.cpp diff --git a/clang/test/CIR/CodeGen/builtin_prefetch.c b/clang/test/CIR/CodeGenBuiltins/builtin_prefetch.c similarity index 100% rename from clang/test/CIR/CodeGen/builtin_prefetch.c rename to clang/test/CIR/CodeGenBuiltins/builtin_prefetch.c diff --git a/clang/test/CIR/CodeGen/builtin_printf.cpp b/clang/test/CIR/CodeGenBuiltins/builtin_printf.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtin_printf.cpp rename to clang/test/CIR/CodeGenBuiltins/builtin_printf.cpp diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGenBuiltins/builtins-elementwise.c similarity index 100% rename from clang/test/CIR/CodeGen/builtins-elementwise.c rename to clang/test/CIR/CodeGenBuiltins/builtins-elementwise.c diff --git a/clang/test/CIR/CodeGen/builtins-floating-point.c b/clang/test/CIR/CodeGenBuiltins/builtins-floating-point.c similarity index 72% rename from clang/test/CIR/CodeGen/builtins-floating-point.c rename to clang/test/CIR/CodeGenBuiltins/builtins-floating-point.c index 1b7de650662c7..a4307c57b04b6 100644 --- a/clang/test/CIR/CodeGen/builtins-floating-point.c +++ b/clang/test/CIR/CodeGenBuiltins/builtins-floating-point.c @@ -46,3 +46,24 @@ long double expl(long double f) { // LLVM: %{{.*}} = call fp128 @llvm.exp.f128(fp128 %{{.*}}) // OGCG: %{{.*}} = call fp128 @llvm.exp.f128(fp128 %{{.*}}) } + +float exp2f(float f) { + return __builtin_exp2f(f); + // CIR: %{{.*}} = cir.exp2 {{.*}} : !cir.float + // LLVM: %{{.*}} = call float @llvm.exp2.f32(float %{{.*}}) + // OGCG: %{{.*}} = call float @llvm.exp2.f32(float %{{.*}}) +} + +double my_exp2(double f) { + return __builtin_exp2(f); + // CIR: %{{.*}} = cir.exp2 {{.*}} : !cir.double + // LLVM: %{{.*}} = call double @llvm.exp2.f64(double %{{.*}}) + // OGCG: %{{.*}} = call double @llvm.exp2.f64(double %{{.*}}) +} + +long double my_exp2l(long double f) { + return __builtin_exp2l(f); + // CIR: %{{.*}} = cir.exp2 {{.*}} : !cir.long_double + // LLVM: %{{.*}} = call fp128 @llvm.exp2.f128(fp128 %{{.*}}) + // OGCG: %{{.*}} = call fp128 @llvm.exp2.f128(fp128 %{{.*}}) +} diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGenBuiltins/builtins-overflow.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtins-overflow.cpp rename to clang/test/CIR/CodeGenBuiltins/builtins-overflow.cpp diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGenBuiltins/builtins.cpp similarity index 100% rename from clang/test/CIR/CodeGen/builtins.cpp rename to clang/test/CIR/CodeGenBuiltins/builtins.cpp diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c index a148940033642..753b4e48c20ed 100644 --- a/clang/test/CodeGen/X86/avx512f-builtins.c +++ b/clang/test/CodeGen/X86/avx512f-builtins.c @@ -6892,6 +6892,7 @@ __m512 test_mm512_shuffle_f32x4(__m512 __A, __m512 __B) { // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> %{{.*}}, <16 x i32> return _mm512_shuffle_f32x4(__A, __B, 4); } +TEST_CONSTEXPR(match_m512(_mm512_shuffle_f32x4(((__m512){1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}), ((__m512){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0}), 0b11111111), 13.0f, 14.0f, 15.0f, 16.0f, 13.0f, 14.0f, 15.0f, 16.0f, 130.0, 140.0, 150.0, 160.0, 130.0, 140.0, 150.0, 160.0)); __m512 test_mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { // CHECK-LABEL: test_mm512_mask_shuffle_f32x4 @@ -6899,6 +6900,7 @@ __m512 test_mm512_mask_shuffle_f32x4(__m512 __W, __mmask16 __U, __m512 __A, __m5 // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_mask_shuffle_f32x4(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512(_mm512_mask_shuffle_f32x4(((__m512){100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f, 700.0f, 800.0f, 900.0f, 1000.0f, 1100.0f, 1200.0f, 1300.0f, 1400.0f, 1500.0f, 1600.0f}), 0b1111111111111110, ((__m512){1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}), ((__m512){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0}), 0b11111111), 100.0f, 14.0f, 15.0f, 16.0f, 13.0f, 14.0f, 15.0f, 16.0f, 130.0, 140.0, 150.0, 160.0, 130.0, 140.0, 150.0, 160.0)); __m512 test_mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_f32x4 @@ -6906,12 +6908,14 @@ __m512 test_mm512_maskz_shuffle_f32x4(__mmask16 __U, __m512 __A, __m512 __B) { // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_maskz_shuffle_f32x4(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512(_mm512_maskz_shuffle_f32x4(0b1111111111110111, ((__m512){1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}), ((__m512){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0}), 0b11111111), 13.0f, 14.0f, 15.0f, 0.0f, 13.0f, 14.0f, 15.0f, 16.0f, 130.0, 140.0, 150.0, 160.0, 130.0, 140.0, 150.0, 160.0)); __m512d test_mm512_shuffle_f64x2(__m512d __A, __m512d __B) { // CHECK-LABEL: test_mm512_shuffle_f64x2 // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> return _mm512_shuffle_f64x2(__A, __B, 4); } +TEST_CONSTEXPR(match_m512d(_mm512_shuffle_f64x2(((__m512d){1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0}), ((__m512d){10.0,20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 0b10101100), 1.0, 2.0, 7.0, 8.0, 50.0, 60.0, 50.0, 60.0)); __m512d test_mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { // CHECK-LABEL: test_mm512_mask_shuffle_f64x2 @@ -6919,6 +6923,7 @@ __m512d test_mm512_mask_shuffle_f64x2(__m512d __W, __mmask8 __U, __m512d __A, __ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_mask_shuffle_f64x2(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512d(_mm512_mask_shuffle_f64x2(((__m512d){100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0}), 0b11110000, ((__m512d){1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0}), ((__m512d){10.0,20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 0b10101100), 100.0, 200.0, 300.0, 400.0, 50.0, 60.0, 50.0, 60.0)); __m512d test_mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_f64x2 @@ -6926,12 +6931,15 @@ __m512d test_mm512_maskz_shuffle_f64x2(__mmask8 __U, __m512d __A, __m512d __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_maskz_shuffle_f64x2(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512d(_mm512_maskz_shuffle_f64x2(0b11110100, ((__m512d){1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0}), ((__m512d){10.0,20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 0b10101100), 0.0, 0.0, 7.0, 0.0, 50.0, 60.0, 50.0, 60.0)); __m512i test_mm512_shuffle_i32x4(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_shuffle_i32x4 // CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> return _mm512_shuffle_i32x4(__A, __B, 4); } +TEST_CONSTEXPR(match_v16si(_mm512_shuffle_i32x4(((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m512i)(__v16si){10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160}), 0), 1, 2, 3, 4, 1, 2, 3, 4, 10, 20, 30, 40, 10, 20, 30, 40)); + __m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_shuffle_i32x4 @@ -6939,6 +6947,7 @@ __m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, _ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} return _mm512_mask_shuffle_i32x4(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_shuffle_i32x4(((__m512i)(__v16si){100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600}), 0b1111111111111011, ((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m512i)(__v16si){10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160}), 0), 1, 2, 300, 4, 1, 2, 3, 4, 10, 20, 30, 40, 10, 20, 30, 40)); __m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_i32x4 @@ -6946,12 +6955,14 @@ __m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} return _mm512_maskz_shuffle_i32x4(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_v16si(_mm512_maskz_shuffle_i32x4(0b1011111111111111, ((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), ((__m512i)(__v16si){10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160}), 0), 1, 2, 3, 4, 1, 2, 3, 4, 10, 20, 30, 40, 10, 20, 0, 40)); __m512i test_mm512_shuffle_i64x2(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_shuffle_i64x2 // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> return _mm512_shuffle_i64x2(__A, __B, 4); } +TEST_CONSTEXPR(match_m512i(_mm512_shuffle_i64x2(((__m512i){1, 2, 3, 4, 5, 6, 7, 8}), ((__m512i){10, 20, 30, 40, 50, 60, 70, 80}), 0b11000110), 5, 6, 3, 4, 10, 20, 70, 80)); __m512i test_mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_shuffle_i64x2 @@ -6959,6 +6970,7 @@ __m512i test_mm512_mask_shuffle_i64x2(__m512i __W, __mmask8 __U, __m512i __A, __ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} return _mm512_mask_shuffle_i64x2(__W, __U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512i(_mm512_mask_shuffle_i64x2(((__m512i){100, 200, 300, 400, 500, 600, 700, 800}), 0b11111101, ((__m512i){1, 2, 3, 4, 5, 6, 7, 8}), ((__m512i){10, 20, 30, 40, 50, 60, 70, 80}), 0b11000110), 5, 200, 3, 4, 10, 20, 70, 80)); __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_i64x2 @@ -6966,6 +6978,7 @@ __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} return _mm512_maskz_shuffle_i64x2(__U, __A, __B, 4); } +TEST_CONSTEXPR(match_m512i(_mm512_maskz_shuffle_i64x2(0b00111101, ((__m512i){1, 2, 3, 4, 5, 6, 7, 8}), ((__m512i){10, 20, 30, 40, 50, 60, 70, 80}), 0b11000110), 5, 0, 3, 4, 10, 20, 0, 0)); __m512d test_mm512_shuffle_pd(__m512d __M, __m512d __V) { // CHECK-LABEL: test_mm512_shuffle_pd diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c index 58bb8bef6fb46..e1878bd84b1ad 100644 --- a/clang/test/CodeGen/X86/avx512vl-builtins.c +++ b/clang/test/CodeGen/X86/avx512vl-builtins.c @@ -9078,6 +9078,7 @@ __m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) { // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> return _mm256_shuffle_f32x4(__A, __B, 3); } +TEST_CONSTEXPR(match_m256(_mm256_shuffle_f32x4(((__m256){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), ((__m256){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 1), 5.0, 6.0, 7.0, 8.0, 10.0, 20.0, 30.0, 40.0)); __m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: test_mm256_mask_shuffle_f32x4 @@ -9085,6 +9086,7 @@ __m256 test_mm256_mask_shuffle_f32x4(__m256 __W, __mmask8 __U, __m256 __A, __m25 // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_shuffle_f32x4(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256(_mm256_mask_shuffle_f32x4(((__m256){100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0}), 0b10101010, ((__m256){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), ((__m256){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 1), 100.0, 6.0, 300.0, 8.0, 500.0, 20.0, 700.0, 40.0)); __m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_f32x4 @@ -9092,12 +9094,14 @@ __m256 test_mm256_maskz_shuffle_f32x4(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_shuffle_f32x4(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256(_mm256_maskz_shuffle_f32x4(0b01010101, ((__m256){1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}), ((__m256){10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0}), 1), 5.0, 0.0, 7.0, 0.0, 10.0, 0.0, 30.0, 0.0)); __m256d test_mm256_shuffle_f64x2(__m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_shuffle_f64x2 // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> return _mm256_shuffle_f64x2(__A, __B, 3); } +TEST_CONSTEXPR(match_m256d(_mm256_shuffle_f64x2(((__m256d){1.0, 2.0, 3.0, 4.0}), ((__m256d){10.0, 20.0, 30.0, 40.0}), 3), 3.0, 4.0, 30.0, 40.0)); __m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_mask_shuffle_f64x2 @@ -9106,6 +9110,7 @@ __m256d test_mm256_mask_shuffle_f64x2(__m256d __W, __mmask8 __U, __m256d __A, __ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_shuffle_f64x2(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256d(_mm256_mask_shuffle_f64x2(((__m256d){100.0, 200.0, 300.0, 400.0}), 0b00001111, ((__m256d){1.0, 2.0, 3.0, 4.0}), ((__m256d){10.0, 20.0, 30.0, 40.0}), 3), 3.0, 4.0, 30.0, 40.0)); __m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_f64x2 @@ -9114,12 +9119,14 @@ __m256d test_mm256_maskz_shuffle_f64x2(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_shuffle_f64x2(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256d(_mm256_maskz_shuffle_f64x2(0b00001011, ((__m256d){1.0, 2.0, 3.0, 4.0}), ((__m256d){10.0, 20.0, 30.0, 40.0}), 3), 3.0, 4.0, 0.0, 40.0)); __m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_shuffle_i32x4 // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> return _mm256_shuffle_i32x4(__A, __B, 3); } +TEST_CONSTEXPR(match_v8si(_mm256_shuffle_i32x4(((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), ((__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80}), 0), 1, 2, 3, 4, 10, 20, 30, 40)); __m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_shuffle_i32x4 @@ -9127,6 +9134,7 @@ __m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_v8si(_mm256_mask_shuffle_i32x4(((__m256i)(__v8si){100, 200, 300, 400, 500, 600, 700, 800}), 0b00000000, ((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), ((__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80}), 0), 100, 200, 300, 400, 500, 600, 700, 800)); __m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_i32x4 @@ -9134,12 +9142,14 @@ __m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_v8si(_mm256_maskz_shuffle_i32x4(0b11111111, ((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), ((__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80}), 0), 1, 2, 3, 4, 10, 20, 30, 40)); __m256i test_mm256_shuffle_i64x2(__m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_shuffle_i64x2 // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> return _mm256_shuffle_i64x2(__A, __B, 3); } +TEST_CONSTEXPR(match_m256i(_mm256_shuffle_i64x2(((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), ((__m256i){10ULL, 20ULL, 30ULL, 40ULL}), 2), 1ULL, 2ULL, 30ULL, 40ULL)); __m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_shuffle_i64x2 @@ -9148,6 +9158,7 @@ __m256i test_mm256_mask_shuffle_i64x2(__m256i __W, __mmask8 __U, __m256i __A, __ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} return _mm256_mask_shuffle_i64x2(__W, __U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256i(_mm256_mask_shuffle_i64x2(((__m256i){100ULL, 200ULL, 300ULL, 400ULL}), 0b00001101, ((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), ((__m256i){10ULL, 20ULL, 30ULL, 40ULL}), 2), 1ULL, 200ULL, 30ULL, 40ULL)); __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_i64x2 @@ -9156,6 +9167,7 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) { // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} return _mm256_maskz_shuffle_i64x2(__U, __A, __B, 3); } +TEST_CONSTEXPR(match_m256i(_mm256_maskz_shuffle_i64x2( 0b00000110, ((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), ((__m256i){10ULL, 20ULL, 30ULL, 40ULL}), 2), 0ULL, 2ULL, 30ULL, 0ULL)); __m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: test_mm_mask_shuffle_pd diff --git a/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c b/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c index 9fbb1221ab39a..2c44842f9d28e 100644 --- a/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c +++ b/clang/test/CodeGen/cfi-icall-trap-recover-runtime.c @@ -171,7 +171,7 @@ void xf(); // PRESERVE_MIN-NEXT: [[TMP3:%.*]] = call i1 @llvm.type.test(ptr [[TMP2]], metadata !"_ZTSFvE"), !nosanitize [[META10:![0-9]+]] // PRESERVE_MIN-NEXT: br i1 [[TMP3]], label %[[CONT:.*]], label %[[HANDLER_CFI_CHECK_FAIL:.*]], !prof [[PROF11:![0-9]+]], !nosanitize [[META10]] // PRESERVE_MIN: [[HANDLER_CFI_CHECK_FAIL]]: -// PRESERVE_MIN-NEXT: call void @__ubsan_handle_cfi_check_fail_minimal() #[[ATTR4:[0-9]+]], !nosanitize [[META10]] +// PRESERVE_MIN-NEXT: call preserve_allcc void @__ubsan_handle_cfi_check_fail_minimal_preserve() #[[ATTR4:[0-9]+]], !nosanitize [[META10]] // PRESERVE_MIN-NEXT: br label %[[CONT]], !nosanitize [[META10]] // PRESERVE_MIN: [[CONT]]: // PRESERVE_MIN-NEXT: call void (...) [[TMP2]]() diff --git a/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp b/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp index 0130d9e33cd9d..2451d31e9a489 100644 --- a/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp +++ b/clang/test/CodeGenCXX/cfi-vcall-trap-recover-runtime.cpp @@ -127,7 +127,7 @@ struct S1 { // PRESERVE_MIN-NEXT: [[TMP2:%.*]] = call i1 @llvm.type.test(ptr [[VTABLE]], metadata !"all-vtables"), !nosanitize [[META5]] // PRESERVE_MIN-NEXT: br i1 [[TMP1]], label %[[CONT:.*]], label %[[HANDLER_CFI_CHECK_FAIL:.*]], !prof [[PROF6:![0-9]+]], !nosanitize [[META5]] // PRESERVE_MIN: [[HANDLER_CFI_CHECK_FAIL]]: -// PRESERVE_MIN-NEXT: call void @__ubsan_handle_cfi_check_fail_minimal() #[[ATTR3:[0-9]+]], !nosanitize [[META5]] +// PRESERVE_MIN-NEXT: call preserve_allcc void @__ubsan_handle_cfi_check_fail_minimal_preserve() #[[ATTR3:[0-9]+]], !nosanitize [[META5]] // PRESERVE_MIN-NEXT: br label %[[CONT]], !nosanitize [[META5]] // PRESERVE_MIN: [[CONT]]: // PRESERVE_MIN-NEXT: [[VFN:%.*]] = getelementptr inbounds ptr, ptr [[VTABLE]], i64 0 diff --git a/clang/test/CodeGenHLSL/builtins/faceforward.hlsl b/clang/test/CodeGenHLSL/builtins/faceforward.hlsl index 70459d81685a1..261454e8bc152 100644 --- a/clang/test/CodeGenHLSL/builtins/faceforward.hlsl +++ b/clang/test/CodeGenHLSL/builtins/faceforward.hlsl @@ -1,9 +1,9 @@ // RUN: %clang_cc1 -finclude-default-header -triple \ // RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type -fnative-int16-type \ -// RUN: -emit-llvm -o - | FileCheck %s +// RUN: -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,DXCHECK // RUN: %clang_cc1 -finclude-default-header -triple \ // RUN: spirv-unknown-vulkan-compute %s -fnative-half-type -fnative-int16-type \ -// RUN: -emit-llvm -o - | FileCheck %s --check-prefix=SPVCHECK +// RUN: -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,SPVCHECK // CHECK-LABEL: test_faceforward_half // CHECK: %hlsl.dot.i = fmul reassoc nnan ninf nsz arcp afn half %{{.*}}, %{{.*}} @@ -11,42 +11,31 @@ // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn half %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, half %{{.*}}, half %fneg.i // CHECK: ret half %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef half @llvm.spv.faceforward.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}) -// SPVCHECK: ret half %spv.faceforward.i half test_faceforward_half(half N, half I, half Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_half2 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.dx.fdot.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}) +// DXCHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF:dx]].fdot.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}) +// SPVCHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF:spv]].fdot.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt half %hlsl.dot.i, 0xH0000 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <2 x half> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <2 x half> %{{.*}}, <2 x half> %fneg.i // CHECK: ret <2 x half> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half2 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <2 x half> @llvm.spv.faceforward.v2f16(<2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}) -// SPVCHECK: ret <2 x half> %spv.faceforward.i half2 test_faceforward_half2(half2 N, half2 I, half2 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_half3 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.dx.fdot.v3f16(<3 x half> %{{.*}}, <3 x half> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF]].fdot.v3f16(<3 x half> %{{.*}}, <3 x half> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt half %hlsl.dot.i, 0xH0000 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <3 x half> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <3 x half> %{{.*}}, <3 x half> %fneg.i // CHECK: ret <3 x half> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half3 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <3 x half> @llvm.spv.faceforward.v3f16(<3 x half> %{{.*}}, <3 x half> %{{.*}}, <3 x half> %{{.*}}) -// SPVCHECK: ret <3 x half> %spv.faceforward.i half3 test_faceforward_half3(half3 N, half3 I, half3 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_half4 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.dx.fdot.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn half @llvm.[[ICF]].fdot.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt half %hlsl.dot.i, 0xH0000 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <4 x half> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <4 x half> %{{.*}}, <4 x half> %fneg.i // CHECK: ret <4 x half> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_half4 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <4 x half> @llvm.spv.faceforward.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x half> %{{.*}}) -// SPVCHECK: ret <4 x half> %spv.faceforward.i half4 test_faceforward_half4(half4 N, half4 I, half4 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float @@ -55,40 +44,28 @@ half4 test_faceforward_half4(half4 N, half4 I, half4 Ng) { return faceforward(N, // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn float %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, float %{{.*}}, float %fneg.i // CHECK: ret float %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef float @llvm.spv.faceforward.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}) -// SPVCHECK: ret float %spv.faceforward.i float test_faceforward_float(float N, float I, float Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float2 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.dx.fdot.v2f32(<2 x float> %{{.*}}, <2 x float> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.[[ICF]].fdot.v2f32(<2 x float> %{{.*}}, <2 x float> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt float %hlsl.dot.i, 0.000000e+00 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <2 x float> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <2 x float> %{{.*}}, <2 x float> %fneg.i // CHECK: ret <2 x float> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float2 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <2 x float> @llvm.spv.faceforward.v2f32(<2 x float> %{{.*}}, <2 x float> %{{.*}}, <2 x float> %{{.*}}) -// SPVCHECK: ret <2 x float> %spv.faceforward.i float2 test_faceforward_float2(float2 N, float2 I, float2 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float3 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.dx.fdot.v3f32(<3 x float> %{{.*}}, <3 x float> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.[[ICF]].fdot.v3f32(<3 x float> %{{.*}}, <3 x float> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt float %hlsl.dot.i, 0.000000e+00 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <3 x float> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <3 x float> %{{.*}}, <3 x float> %fneg.i // CHECK: ret <3 x float> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float3 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <3 x float> @llvm.spv.faceforward.v3f32(<3 x float> %{{.*}}, <3 x float> %{{.*}}, <3 x float> %{{.*}}) -// SPVCHECK: ret <3 x float> %spv.faceforward.i float3 test_faceforward_float3(float3 N, float3 I, float3 Ng) { return faceforward(N, I, Ng); } // CHECK-LABEL: test_faceforward_float4 -// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.dx.fdot.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}) +// CHECK: %hlsl.dot.i = call reassoc nnan ninf nsz arcp afn float @llvm.[[ICF]].fdot.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}) // CHECK: %cmp.i = fcmp reassoc nnan ninf nsz arcp afn olt float %hlsl.dot.i, 0.000000e+00 // CHECK: %fneg.i = fneg reassoc nnan ninf nsz arcp afn <4 x float> %{{.*}} // CHECK: %hlsl.select.i = select reassoc nnan ninf nsz arcp afn i1 %cmp.i, <4 x float> %{{.*}}, <4 x float> %fneg.i // CHECK: ret <4 x float> %hlsl.select.i -// SPVCHECK-LABEL: test_faceforward_float4 -// SPVCHECK: %spv.faceforward.i = call reassoc nnan ninf nsz arcp afn noundef <4 x float> @llvm.spv.faceforward.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) -// SPVCHECK: ret <4 x float> %spv.faceforward.i float4 test_faceforward_float4(float4 N, float4 I, float4 Ng) { return faceforward(N, I, Ng); } diff --git a/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c b/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c index 3ca3aaa0b70f4..0ba4767c8ddda 100644 --- a/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c +++ b/clang/test/DebugInfo/Generic/dbg-info-all-calls-described.c @@ -59,6 +59,13 @@ // RUN: -debug-info-kind=standalone -dwarf-version=4 \ // RUN: | FileCheck %s -check-prefix=NO-ATTR +// Disabled by feature flag (enabled by default) +// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \ +// RUN: -O1 -disable-llvm-passes \ +// RUN: -debug-info-kind=standalone -dwarf-version=5 \ +// RUN: -gno-call-site-info \ +// RUN: | FileCheck %s -check-prefix=NO-ATTR + // NO-ATTR-NOT: FlagAllCallsDescribed // HAS-ATTR-DAG: DISubprogram(name: "declaration1", {{.*}}, spFlags: DISPFlagOptimized) diff --git a/clang/test/Driver/debug-options.c b/clang/test/Driver/debug-options.c index 45ac450ac8faa..27e2728f15948 100644 --- a/clang/test/Driver/debug-options.c +++ b/clang/test/Driver/debug-options.c @@ -297,6 +297,9 @@ // RUN: %clang -### -g -gno-column-info %s 2>&1 \ // RUN: | FileCheck -check-prefix=NOCI %s // +// RUN: %clang -### -g -gno-call-site-info %s 2>&1 \ +// RUN: | FileCheck -check-prefix=NOCALLSITE %s +// // RUN: %clang -### -g -target x86_64-unknown-unknown %s 2>&1 \ // | FileCheck -check-prefix=CI %s // @@ -426,6 +429,8 @@ // // NOCI-DAG: "-gno-column-info" // +// NOCALLSITE: "-gno-call-site-info" +// // GEXTREFS: "-dwarf-ext-refs" "-fmodule-format=obj" // GEXTREFS: "-debug-info-kind={{standalone|constructor}}" // NOGEXTREFS-NOT: -dwarf-ext-refs diff --git a/clang/test/Driver/fsanitize.c b/clang/test/Driver/fsanitize.c index f2a4d8c50ec23..c02b8828062f2 100644 --- a/clang/test/Driver/fsanitize.c +++ b/clang/test/Driver/fsanitize.c @@ -984,10 +984,20 @@ // CHECK-UBSAN-MINIMAL: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} // CHECK-UBSAN-MINIMAL: "-fsanitize-minimal-runtime" -// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE -// CHECK-UBSAN-MINIMAL-PRESERVE: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} -// CHECK-UBSAN-MINIMAL-PRESERVE: "-fsanitize-minimal-runtime" -// CHECK-UBSAN-MINIMAL-PRESERVE: "-fsanitize-handler-preserve-all-regs +// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE-X86-64 +// CHECK-UBSAN-MINIMAL-PRESERVE-X86-64: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} +// CHECK-UBSAN-MINIMAL-PRESERVE-X86-64: "-fsanitize-minimal-runtime" +// CHECK-UBSAN-MINIMAL-PRESERVE-X86-64: "-fsanitize-handler-preserve-all-regs + +// RUN: %clang --target=aarch64-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64 +// CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} +// CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64: "-fsanitize-minimal-runtime" +// CHECK-UBSAN-MINIMAL-PRESERVE-AARCH64: "-fsanitize-handler-preserve-all-regs + +// RUN: %clang --target=i386-linux-gnu -fsanitize=undefined -fsanitize-minimal-runtime -fsanitize-handler-preserve-all-regs %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UBSAN-MINIMAL-PRESERVE-I386 +// CHECK-UBSAN-MINIMAL-PRESERVE-I386: "-fsanitize={{((signed-integer-overflow|integer-divide-by-zero|shift-base|shift-exponent|unreachable|return|vla-bound|alignment|null|pointer-overflow|float-cast-overflow|array-bounds|enum|bool|builtin|returns-nonnull-attribute|nonnull-attribute|function),?){18}"}} +// CHECK-UBSAN-MINIMAL-PRESERVE-I386: "-fsanitize-minimal-runtime" +// CHECK-UBSAN-MINIMAL-PRESERVE-I386-NOT: "-fsanitize-handler-preserve-all-regs // RUN: %clang --target=x86_64-linux-gnu -fsanitize=integer -fsanitize-trap=integer %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-INTSAN-TRAP // CHECK-INTSAN-TRAP: "-fsanitize-trap=integer-divide-by-zero,shift-base,shift-exponent,signed-integer-overflow,unsigned-integer-overflow,unsigned-shift-base,implicit-unsigned-integer-truncation,implicit-signed-integer-truncation,implicit-integer-sign-change" diff --git a/clang/test/Driver/hip-spirv-backend-bindings.c b/clang/test/Driver/hip-spirv-backend-bindings.c new file mode 100644 index 0000000000000..59b3f4fb54d4c --- /dev/null +++ b/clang/test/Driver/hip-spirv-backend-bindings.c @@ -0,0 +1,57 @@ +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-BASE,CHECK-SPIRV + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -fgpu-rdc -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-BASE,CHECK-SPIRV-RDC + +// CHECK-SPIRV-BASE: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-BASE: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV: # "spirv64-amd-amdhsa" - "Offload::Packager", inputs: ["[[SPV_BC]]"], output: "[[HIP_OUT:.+\.out]]" +// CHECK-SPIRV: # "spirv64-amd-amdhsa" - "Offload::Linker", inputs: ["[[HIP_OUT]]"], output: "[[HIPFB:.+\.hipfb]]" +// CHECK-SPIRV-RDC: # "x86_64-unknown-linux-gnu" - "Offload::Packager", inputs: ["[[SPV_BC]]"], output: "[[HIP_OUT:.+\.out]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[INPUT]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[HIPI]]", "[[HIPFB]]"], output: "[[x86_BC:.+\.bc]]" +// CHECK-SPIRV-RDC: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[HIPI]]", "[[HIP_OUT]]"], output: "[[x86_BC:.+\.bc]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "clang", inputs: ["[[x86_BC]]"], output: "[[x86_S:.+\.s]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "clang::as", inputs: ["[[x86_S]]"], output: "[[x86_O:.+\.o]]" +// CHECK-SPIRV-BASE: # "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[x86_O]]"], output: "{{.+\.out}}" + +// CHECK-SPIRV # "x86_64-unknown-linux-gnu" - "Offload::Linker", inputs: ["[[x86_O]]"], output: "[[x86_O:.+\.o]]" +// CHECK-SPIRV # "x86_64-unknown-linux-gnu" - "GNU::Linker", inputs: ["[[x86_O]]"], output: "{{.+\.out}}" + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[SPV_BC]]"], output: "[[SPV_OUT:.+\.out]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "AMDGCN::Linker", inputs: ["[[SPV_OUT]]"], output: "{{.+\.hipfb}}" + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -fgpu-rdc -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[SPV_BC]]"], output: "{{.+}}" + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -fgpu-rdc -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -ccc-print-bindings \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[INPUT:.+]]"], output: "[[HIPI:.+\.hipi]]" +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[HIPI]]"], output: "[[SPV_BC:.+\.bc]]" +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: # "spirv64-amd-amdhsa" - "clang", inputs: ["[[SPV_BC]]"], output: "{{.+\.s}}" diff --git a/clang/test/Driver/hip-spirv-backend-opt.c b/clang/test/Driver/hip-spirv-backend-opt.c new file mode 100644 index 0000000000000..10d9a0b01caf3 --- /dev/null +++ b/clang/test/Driver/hip-spirv-backend-opt.c @@ -0,0 +1,61 @@ +// This test case validates the behavior of -use-spirv-backend + +// --offload-device-only is always set --- testing interactions with -S and -fgpu-rdc + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-TEXTUAL + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BINARY + +// The new driver's behavior is to emit LLVM IR for --offload-device-only and -fgpu-rdc (independently of SPIR-V). +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -### -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-LL,CHECK-FGPU-RDC + +// The new driver's behavior is to emit LLVM IR for --offload-device-only and -fgpu-rdc (independently of SPIR-V). +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-FGPU-RDC + +// --offload-device-only is always unset --- testing interactions with -S and -fgpu-rdc + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -S -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-FGPU-RDC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -S -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -fgpu-rdc -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-CLANG-LINKER-WRAPPER + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-CLANG-LINKER-WRAPPER + +// RUN: %clang --no-offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -### -x hip %s -save-temps \ +// RUN: -use-spirv-backend -no-canonical-prefixes \ +// RUN: 2>&1 | FileCheck %s --check-prefixes=CHECK-SPIRV-TRANSLATOR,CHECK-SPIRV-BACKEND-BC,CHECK-SPIRV-BACKEND-BINARY-EQ-TRIPLE + +// CHECK-SPIRV-TRANSLATOR-NOT: "{{.*llvm-spirv.*}}" +// CHECK-SPIRV-BACKEND-TEXTUAL: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-S" +// CHECK-SPIRV-BACKEND-BINARY: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-emit-obj" +// CHECK-SPIRV-BACKEND-BC: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-emit-llvm-bc" +// CHECK-SPIRV-BACKEND-LL: "{{.*clang(\.exe)?}}" "-cc1" "-triple" "spirv64-amd-amdhsa" {{.*}} "-emit-llvm" +// CHECK-SPIRV-BACKEND-BINARY-EQ-TRIPLE: "{{.*clang(\.exe)?}}" "-cc1" {{.*}}"-triple=spirv64-amd-amdhsa" {{.*}}"-emit-obj" +// CHECK-FGPU-RDC-SAME: {{.*}} "-fgpu-rdc" +// CHECK-CLANG-LINKER-WRAPPER: "{{.*}}clang-linker-wrapper" "--should-extract=amdgcnspirv" {{.*}} "--device-compiler=spirv64-amd-amdhsa=-use-spirv-backend" diff --git a/clang/test/Driver/hip-spirv-backend-phases.c b/clang/test/Driver/hip-spirv-backend-phases.c new file mode 100644 index 0000000000000..d743b8cd50c40 --- /dev/null +++ b/clang/test/Driver/hip-spirv-backend-phases.c @@ -0,0 +1,80 @@ +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-BINARY + +// CHECK-SPIRV-BINARY: [[P0:[0-9]+]]: input, "[[INPUT:.*]].c", hip, (host-hip) +// CHECK-SPIRV-BINARY: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (host-hip) +// CHECK-SPIRV-BINARY: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (host-hip) + +// CHECK-SPIRV-BINARY: [[P3:[0-9]+]]: input, "[[INPUT]].c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY: [[P4:[0-9]+]]: preprocessor, {[[P3]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY: [[P5:[0-9]+]]: compiler, {[[P4]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY: [[P6:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P5]]}, ir +// CHECK-SPIRV-BINARY: [[P7:[0-9]+]]: llvm-offload-binary, {[[P6]]}, image, (device-hip) +// CHECK-SPIRV-BINARY: [[P8:[0-9]+]]: clang-linker-wrapper, {[[P7]]}, hip-fatbin, (device-hip) + +// CHECK-SPIRV-BINARY: [[P9:[0-9]+]]: offload, "host-hip (x86_64-unknown-linux-gnu)" {[[P2]]}, "device-hip (spirv64-amd-amdhsa)" {[[P8]]}, ir +// CHECK-SPIRV-BINARY: [[P10:[0-9]+]]: backend, {[[P9]]}, assembler, (host-hip) +// CHECK-SPIRV-BINARY: [[P11:[0-9]+]]: assembler, {[[P10]]}, object, (host-hip) +// CHECK-SPIRV-BINARY: [[P12:[0-9]+]]: clang-linker-wrapper, {[[P11]]}, image, (host-hip) + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend -fgpu-rdc -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-BINARY-RDC + +// CHECK-SPIRV-BINARY-RDC: [[P0:[0-9]+]]: input, "[[INPUT:.*]].c", hip, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (host-hip) + +// CHECK-SPIRV-BINARY-RDC: [[P3:[0-9]+]]: input, "[[INPUT]].c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-RDC: [[P4:[0-9]+]]: preprocessor, {[[P3]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-RDC: [[P5:[0-9]+]]: compiler, {[[P4]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-RDC: [[P6:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P5]]}, ir +// CHECK-SPIRV-BINARY-RDC: [[P7:[0-9]+]]: llvm-offload-binary, {[[P6]]}, image, (device-hip) + +// CHECK-SPIRV-BINARY-RDC: [[P8:[0-9]+]]: offload, "host-hip (x86_64-unknown-linux-gnu)" {[[P2]]}, "device-hip (x86_64-unknown-linux-gnu)" {[[P7]]}, ir +// CHECK-SPIRV-BINARY-RDC: [[P9:[0-9]+]]: backend, {[[P8]]}, assembler, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P10:[0-9]+]]: assembler, {[[P9]]}, object, (host-hip) +// CHECK-SPIRV-BINARY-RDC: [[P11:[0-9]+]]: clang-linker-wrapper, {[[P10]]}, image, (host-hip) + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P0:[0-9]+]]: input, "{{.*}}.c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P3:[0-9]+]]: backend, {[[P2]]}, image, (device-hip, amdgcnspirv) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P4:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P3]]}, image +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P5:[0-9]+]]: linker, {[[P4]]}, hip-fatbin, (device-hip) +// CHECK-SPIRV-BINARY-OFFLOAD-DEVICE-ONLY: [[P6:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa)" {[[P5]]}, none + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -fgpu-rdc -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P0:[0-9]+]]: input, "{{.*}}.c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P3:[0-9]+]]: backend, {[[P2]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC: [[P4:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P3]]}, none + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -fgpu-rdc -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-OFFLOAD-DEVICE-ONLY-RDC + +// RUN: %clang --offload-new-driver --target=x86_64-unknown-linux-gnu --offload-arch=amdgcnspirv \ +// RUN: -nogpuinc -nogpulib -x hip %s -save-temps \ +// RUN: -use-spirv-backend --offload-device-only -S -ccc-print-phases \ +// RUN: 2>&1 | FileCheck %s --check-prefix=CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY + +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P0:[0-9]+]]: input, "{{.*}}.c", hip, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P1:[0-9]+]]: preprocessor, {[[P0]]}, hip-cpp-output, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P2:[0-9]+]]: compiler, {[[P1]]}, ir, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P3:[0-9]+]]: backend, {[[P2]]}, assembler, (device-hip, amdgcnspirv) +// CHECK-SPIRV-TEXTUAL-OFFLOAD-DEVICE-ONLY: [[P4:[0-9]+]]: offload, "device-hip (spirv64-amd-amdhsa:amdgcnspirv)" {[[P3]]}, none diff --git a/clang/test/Driver/riscv-features.c b/clang/test/Driver/riscv-features.c index 1c8b52bd31997..97736ff81c799 100644 --- a/clang/test/Driver/riscv-features.c +++ b/clang/test/Driver/riscv-features.c @@ -68,13 +68,6 @@ // DEFAULT-LINUX-SAME: "-target-feature" "+d" // DEFAULT-LINUX-SAME: "-target-feature" "+c" -// RUN: not %clang -c --target=riscv64-linux-gnu -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefix=ERR-SPLIT-DWARF -// RUN: not %clang -c --target=riscv64 -gsplit-dwarf=single %s 2>&1 | FileCheck %s --check-prefix=ERR-SPLIT-DWARF -// RUN: %clang -### -c --target=riscv64 -mno-relax -g -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefix=SPLIT-DWARF - -// ERR-SPLIT-DWARF: error: -gsplit-dwarf{{.*}} is unsupported with RISC-V linker relaxation (-mrelax) -// SPLIT-DWARF: "-split-dwarf-file" - // RUN: %clang -mabi=lp64d --target=riscv64-unknown-fuchsia -### %s -fsyntax-only 2>&1 | FileCheck %s -check-prefixes=FUCHSIA // FUCHSIA: "-target-feature" "+m" // FUCHSIA-SAME: "-target-feature" "+a" diff --git a/clang/test/Sema/AArch64/sve-vector-conditional-op.cpp b/clang/test/Sema/AArch64/sve-vector-conditional-op.cpp new file mode 100644 index 0000000000000..0ca55e6268658 --- /dev/null +++ b/clang/test/Sema/AArch64/sve-vector-conditional-op.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 %s -fsyntax-only -triple aarch64-none-linux-gnu -target-feature +sve -verify + +typedef int fixed_vector __attribute__((vector_size(4))); + +auto error_fixed_vector_result(__SVBool_t svbool, fixed_vector a, fixed_vector b) { + // expected-error@+1 {{vector condition type '__SVBool_t' and result type 'fixed_vector' (vector of 1 'int' value) do not have the same number of elements}} + return svbool ? a : b; +} + +auto error_void_result(__SVBool_t svbool) { + // expected-error@+1 {{GNU vector conditional operand cannot be void}} + return svbool ? (void)0 : (void)1; +} + +auto error_sve_splat_result_unsupported(__SVBool_t svbool, long long a, long long b) { + // expected-error@+1 {{scalar type 'long long' not supported with vector condition type '__SVBool_t'}} + return svbool ? a : b; +} + +auto error_sve_vector_result_matched_element_count(__SVBool_t svbool, __SVUint32_t a, __SVUint32_t b) { + // expected-error@+1 {{vector condition type '__SVBool_t' and result type '__SVUint32_t' do not have the same number of elements}} + return svbool ? a : b; +} + +// The following cases should be supported: + +__SVBool_t cond_svbool(__SVBool_t a, __SVBool_t b) { + return a < b ? a : b; +} + +__SVFloat32_t cond_svf32(__SVFloat32_t a, __SVFloat32_t b) { + return a < b ? a : b; +} + +__SVUint64_t cond_u64_splat(__SVUint64_t a) { + return a < 1ul ? a : 1ul; +} diff --git a/clang/test/SemaCXX/return.cpp b/clang/test/SemaCXX/return.cpp index 796c9ae91dedc..92be66c24489e 100644 --- a/clang/test/SemaCXX/return.cpp +++ b/clang/test/SemaCXX/return.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -std=c++11 -fcxx-exceptions -fexceptions -fsyntax-only -Wignored-qualifiers -verify +// RUN: %clang_cc1 %s -std=c++14 -fcxx-exceptions -fexceptions -fsyntax-only -Wignored-qualifiers -verify int test1() { throw; @@ -132,3 +133,27 @@ void cxx_unresolved_expr() { // expr doesn't assert. return int(undeclared, 4; // expected-error {{use of undeclared identifier 'undeclared'}} } + +#if __cplusplus >= 201402L +namespace GH43054 { +struct S{}; +const auto foo() { return 0; } // expected-warning {{'const' type qualifier on return type has no effect}} +const auto bar() { return S{}; } +template +const auto baz() { return T{}; } + +void test() { + baz(); + baz(); + + []() -> const auto { // expected-warning {{'const' type qualifier on return type has no effect}} + return 0; + }(); + + []() -> const auto { + return S{}; + }(); +} +} + +#endif diff --git a/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip b/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip index 366278f648939..b49c1866caa1c 100644 --- a/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip +++ b/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip @@ -1,7 +1,7 @@ // REQUIRES: amdgpu-registered-target -// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx950 -verify=device %s -fcuda-is-device -// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify=host %s -// device-no-diagnostics +// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx950 -verify %s -fcuda-is-device +// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify %s +// expected-no-diagnostics #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) @@ -20,11 +20,11 @@ __device__ void i_am_device(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ v __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0); __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0); - __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0); __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0); @@ -46,11 +46,11 @@ __global__ void i_am_kernel(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ v __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0); __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0); - __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} - __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0); __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0); diff --git a/clang/tools/c-index-test/CMakeLists.txt b/clang/tools/c-index-test/CMakeLists.txt index 24e7c9692ca56..41e80e66ffa7a 100644 --- a/clang/tools/c-index-test/CMakeLists.txt +++ b/clang/tools/c-index-test/CMakeLists.txt @@ -27,6 +27,7 @@ else() libclang clangAST clangBasic + clangDriver clangFrontend clangIndex clangSerialization diff --git a/clang/tools/c-index-test/core_main.cpp b/clang/tools/c-index-test/core_main.cpp index 5a3086a7fc08f..c67479fd130ca 100644 --- a/clang/tools/c-index-test/core_main.cpp +++ b/clang/tools/c-index-test/core_main.cpp @@ -8,6 +8,7 @@ #include "clang/AST/Mangle.h" #include "clang/Basic/LangOptions.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" diff --git a/clang/tools/diagtool/CMakeLists.txt b/clang/tools/diagtool/CMakeLists.txt index b49619c075c73..09b2a81790f87 100644 --- a/clang/tools/diagtool/CMakeLists.txt +++ b/clang/tools/diagtool/CMakeLists.txt @@ -15,5 +15,6 @@ add_clang_tool(diagtool clang_target_link_libraries(diagtool PRIVATE clangBasic + clangDriver clangFrontend ) diff --git a/clang/tools/diagtool/ShowEnabledWarnings.cpp b/clang/tools/diagtool/ShowEnabledWarnings.cpp index bea0288c09358..5b25e656dafa4 100644 --- a/clang/tools/diagtool/ShowEnabledWarnings.cpp +++ b/clang/tools/diagtool/ShowEnabledWarnings.cpp @@ -9,6 +9,7 @@ #include "DiagTool.h" #include "DiagnosticNames.h" #include "clang/Basic/LLVM.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/TextDiagnosticBuffer.h" #include "clang/Frontend/TextDiagnosticPrinter.h" diff --git a/clang/tools/driver/cc1_main.cpp b/clang/tools/driver/cc1_main.cpp index 300d59df1bf7b..cc757039cafd0 100644 --- a/clang/tools/driver/cc1_main.cpp +++ b/clang/tools/driver/cc1_main.cpp @@ -17,6 +17,7 @@ #include "clang/Basic/TargetOptions.h" #include "clang/CodeGen/ObjectFilePCHContainerWriter.h" #include "clang/Config/config.h" +#include "clang/Driver/Driver.h" #include "clang/Driver/DriverDiagnostic.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" @@ -269,7 +270,7 @@ int cc1_main(ArrayRef Argv, const char *Argv0, void *MainAddr) { if (Clang->getHeaderSearchOpts().UseBuiltinIncludes && Clang->getHeaderSearchOpts().ResourceDir.empty()) Clang->getHeaderSearchOpts().ResourceDir = - CompilerInvocation::GetResourcesPath(Argv0, MainAddr); + GetResourcesPath(Argv0, MainAddr); /// Create the actual file system. Clang->createVirtualFileSystem(llvm::vfs::getRealFileSystem(), DiagsBuffer); diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp index f4d6fa72a1dfe..32e84248c1b27 100644 --- a/clang/tools/libclang/CIndex.cpp +++ b/clang/tools/libclang/CIndex.cpp @@ -38,6 +38,7 @@ #include "clang/Basic/Stack.h" #include "clang/Basic/TargetInfo.h" #include "clang/Basic/Version.h" +#include "clang/Driver/CreateASTUnitFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Index/CommentToXML.h" @@ -4361,7 +4362,7 @@ clang_parseTranslationUnit_Impl(CXIndex CIdx, const char *source_filename, LibclangInvocationReporter InvocationReporter( *CXXIdx, LibclangInvocationReporter::OperationKind::ParseOperation, options, llvm::ArrayRef(*Args), /*InvocationArgs=*/{}, unsaved_files); - std::unique_ptr Unit = ASTUnit::LoadFromCommandLine( + std::unique_ptr Unit = CreateASTUnitFromCommandLine( Args->data(), Args->data() + Args->size(), CXXIdx->getPCHContainerOperations(), DiagOpts, Diags, CXXIdx->getClangResourcesPath(), CXXIdx->getStorePreamblesInMemory(), diff --git a/clang/tools/libclang/CIndexer.cpp b/clang/tools/libclang/CIndexer.cpp index 11d9312b64849..853a936b43e37 100644 --- a/clang/tools/libclang/CIndexer.cpp +++ b/clang/tools/libclang/CIndexer.cpp @@ -16,6 +16,7 @@ #include "clang/Basic/Version.h" #include "clang/Config/config.h" #include "clang/Driver/Driver.h" +#include "clang/Options/OptionUtils.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/FileSystem.h" @@ -137,7 +138,7 @@ const std::string &CIndexer::getClangResourcesPath() { #endif // Cache our result. - ResourcesPath = driver::Driver::GetResourcesPath(LibClangPath); + ResourcesPath = GetResourcesPath(LibClangPath); return ResourcesPath; } diff --git a/clang/tools/libclang/CMakeLists.txt b/clang/tools/libclang/CMakeLists.txt index e0ff7605b68b8..b0105f5a5f79f 100644 --- a/clang/tools/libclang/CMakeLists.txt +++ b/clang/tools/libclang/CMakeLists.txt @@ -65,6 +65,7 @@ set(LIBS clangFrontend clangIndex clangLex + clangOptions clangRewrite clangSema clangSerialization diff --git a/clang/tools/libclang/Indexing.cpp b/clang/tools/libclang/Indexing.cpp index c142f142d5071..75323d70afcfe 100644 --- a/clang/tools/libclang/Indexing.cpp +++ b/clang/tools/libclang/Indexing.cpp @@ -15,6 +15,7 @@ #include "CXString.h" #include "CXTranslationUnit.h" #include "clang/AST/ASTConsumer.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" diff --git a/clang/unittests/Driver/DXCModeTest.cpp b/clang/unittests/Driver/DXCModeTest.cpp index e0454f190b35a..130da620b40b5 100644 --- a/clang/unittests/Driver/DXCModeTest.cpp +++ b/clang/unittests/Driver/DXCModeTest.cpp @@ -15,6 +15,7 @@ #include "clang/Basic/LLVM.h" #include "clang/Basic/TargetOptions.h" #include "clang/Driver/Compilation.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Driver/Driver.h" #include "clang/Driver/ToolChain.h" #include "clang/Frontend/CompilerInstance.h" diff --git a/clang/unittests/Driver/ToolChainTest.cpp b/clang/unittests/Driver/ToolChainTest.cpp index afa17ff219be2..8f533790ec501 100644 --- a/clang/unittests/Driver/ToolChainTest.cpp +++ b/clang/unittests/Driver/ToolChainTest.cpp @@ -17,6 +17,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/Basic/TargetOptions.h" #include "clang/Driver/Compilation.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Driver/Driver.h" #include "clang/Frontend/CompilerInstance.h" #include "llvm/ADT/ArrayRef.h" diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp index d578fa7a1a1e8..fec1c48c448d2 100644 --- a/clang/unittests/Format/ConfigParseTest.cpp +++ b/clang/unittests/Format/ConfigParseTest.cpp @@ -1164,6 +1164,36 @@ TEST(ConfigParseTest, ParsesConfiguration) { FormatStyle::BLS_Block); CHECK_PARSE("Cpp11BracedListStyle: true", Cpp11BracedListStyle, FormatStyle::BLS_AlignFirstComment); + + constexpr FormatStyle::IntegerLiteralSeparatorStyle + ExpectedIntegerLiteralSeparatorStyle{/*Binary=*/2, + /*BinaryMinDigitInsert=*/5, + /*BinaryMaxDigitRemove=*/2, + /*Decimal=*/6, + /*DecimalMinDigitInsert=*/6, + /*DecimalMaxDigitRemove=*/3, + /*Hex=*/4, + /*HexMinDigitInsert=*/2, + /*HexMaxDigitRemove=*/1}; + CHECK_PARSE("IntegerLiteralSeparator:\n" + " Binary: 2\n" + " BinaryMinDigitsInsert: 5\n" + " BinaryMaxDigitsRemove: 2\n" + " Decimal: 6\n" + " DecimalMinDigitsInsert: 6\n" + " DecimalMaxDigitsRemove: 3\n" + " Hex: 4\n" + " HexMinDigitsInsert: 2\n" + " HexMaxDigitsRemove: 1", + IntegerLiteralSeparator, ExpectedIntegerLiteralSeparatorStyle); + + // Backward compatibility: + CHECK_PARSE_NESTED_VALUE("BinaryMinDigits: 6", IntegerLiteralSeparator, + BinaryMinDigitsInsert, 6); + CHECK_PARSE_NESTED_VALUE("DecimalMinDigits: 5", IntegerLiteralSeparator, + DecimalMinDigitsInsert, 5); + CHECK_PARSE_NESTED_VALUE("HexMinDigits: 5", IntegerLiteralSeparator, + HexMinDigitsInsert, 5); } TEST(ConfigParseTest, ParsesConfigurationWithLanguages) { diff --git a/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp b/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp index 53b6dd8efadff..21cdab2187d90 100644 --- a/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp +++ b/clang/unittests/Format/IntegerLiteralSeparatorTest.cpp @@ -137,34 +137,34 @@ TEST_F(IntegerLiteralSeparatorTest, UnderscoreAsSeparator) { verifyFormat("o = 0o400000000000000003n;", Style); } -TEST_F(IntegerLiteralSeparatorTest, MinDigits) { +TEST_F(IntegerLiteralSeparatorTest, MinDigitsInsert) { FormatStyle Style = getLLVMStyle(); Style.IntegerLiteralSeparator.Binary = 3; Style.IntegerLiteralSeparator.Decimal = 3; Style.IntegerLiteralSeparator.Hex = 2; - Style.IntegerLiteralSeparator.BinaryMinDigits = 7; + Style.IntegerLiteralSeparator.BinaryMinDigitsInsert = 7; verifyFormat("b1 = 0b101101;\n" "b2 = 0b1'101'101;", "b1 = 0b101'101;\n" "b2 = 0b1101101;", Style); - Style.IntegerLiteralSeparator.DecimalMinDigits = 5; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 5; verifyFormat("d1 = 2023;\n" "d2 = 10'000;", "d1 = 2'023;\n" "d2 = 100'00;", Style); - Style.IntegerLiteralSeparator.DecimalMinDigits = 3; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 3; verifyFormat("d1 = 123;\n" "d2 = 1'234;", "d1 = 12'3;\n" "d2 = 12'34;", Style); - Style.IntegerLiteralSeparator.HexMinDigits = 6; + Style.IntegerLiteralSeparator.HexMinDigitsInsert = 6; verifyFormat("h1 = 0xABCDE;\n" "h2 = 0xAB'CD'EF;", "h1 = 0xA'BC'DE;\n" @@ -243,6 +243,23 @@ TEST_F(IntegerLiteralSeparatorTest, FloatingPoint) { Style); } +TEST_F(IntegerLiteralSeparatorTest, MaxDigitsRemove) { + auto Style = getLLVMStyle(); + Style.IntegerLiteralSeparator.Decimal = 3; + Style.IntegerLiteralSeparator.DecimalMaxDigitsRemove = 4; + Style.IntegerLiteralSeparator.DecimalMinDigitsInsert = 7; + + verifyFormat("d1 = 123456;\n" + "d2 = 1234'56;", + Style); + + verifyFormat("d0 = 2023;\n" + "d3 = 5'000'000;", + "d0 = 20'2'3;\n" + "d3 = 5000000;", + Style); +} + } // namespace } // namespace test } // namespace format diff --git a/clang/unittests/Frontend/ASTUnitTest.cpp b/clang/unittests/Frontend/ASTUnitTest.cpp index dfdbe90e72f1f..bf9e4e184b5db 100644 --- a/clang/unittests/Frontend/ASTUnitTest.cpp +++ b/clang/unittests/Frontend/ASTUnitTest.cpp @@ -9,6 +9,8 @@ #include #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateASTUnitFromArgs.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" @@ -173,7 +175,7 @@ TEST_F(ASTUnitTest, LoadFromCommandLineEarlyError) { auto PCHContainerOps = std::make_shared(); std::unique_ptr ErrUnit; - std::unique_ptr AST = ASTUnit::LoadFromCommandLine( + std::unique_ptr AST = CreateASTUnitFromCommandLine( &Args[0], &Args[4], PCHContainerOps, DiagOpts, Diags, "", false, "", false, CaptureDiagsKind::All, {}, true, 0, TU_Complete, false, false, false, SkipFunctionBodiesScope::None, false, true, false, false, @@ -201,7 +203,7 @@ TEST_F(ASTUnitTest, LoadFromCommandLineWorkingDirectory) { auto PCHContainerOps = std::make_shared(); std::unique_ptr ErrUnit; - std::unique_ptr AST = ASTUnit::LoadFromCommandLine( + std::unique_ptr AST = CreateASTUnitFromCommandLine( &Args[0], &Args[4], PCHContainerOps, DiagOpts, Diags, "", false, "", false, CaptureDiagsKind::All, {}, true, 0, TU_Complete, false, false, false, SkipFunctionBodiesScope::None, false, true, false, false, diff --git a/clang/unittests/Frontend/CompilerInstanceTest.cpp b/clang/unittests/Frontend/CompilerInstanceTest.cpp index cd3fefa1ea994..39d35b48f394a 100644 --- a/clang/unittests/Frontend/CompilerInstanceTest.cpp +++ b/clang/unittests/Frontend/CompilerInstanceTest.cpp @@ -8,6 +8,7 @@ #include "clang/Frontend/CompilerInstance.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" #include "clang/Frontend/TextDiagnosticPrinter.h" diff --git a/clang/unittests/Frontend/UtilsTest.cpp b/clang/unittests/Frontend/UtilsTest.cpp index fc411e4af705f..a82733d57714a 100644 --- a/clang/unittests/Frontend/UtilsTest.cpp +++ b/clang/unittests/Frontend/UtilsTest.cpp @@ -9,6 +9,7 @@ #include "clang/Frontend/Utils.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/TargetOptions.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Lex/PreprocessorOptions.h" diff --git a/clang/unittests/Sema/CMakeLists.txt b/clang/unittests/Sema/CMakeLists.txt index b61ed8c457635..188f6135a60ac 100644 --- a/clang/unittests/Sema/CMakeLists.txt +++ b/clang/unittests/Sema/CMakeLists.txt @@ -13,6 +13,7 @@ add_distinct_clang_unittest(SemaTests clangAST clangASTMatchers clangBasic + clangDriver clangFrontend clangParse clangSema diff --git a/clang/unittests/Sema/SemaNoloadLookupTest.cpp b/clang/unittests/Sema/SemaNoloadLookupTest.cpp index e565372698e5e..3944269eff502 100644 --- a/clang/unittests/Sema/SemaNoloadLookupTest.cpp +++ b/clang/unittests/Sema/SemaNoloadLookupTest.cpp @@ -10,6 +10,7 @@ #include "clang/AST/DeclarationName.h" #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp index edf33ae04230b..b76dcfec96063 100644 --- a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp +++ b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp @@ -9,6 +9,7 @@ #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/LoadSpecLazilyTest.cpp b/clang/unittests/Serialization/LoadSpecLazilyTest.cpp index d7b55491fddac..f55925aeae1f2 100644 --- a/clang/unittests/Serialization/LoadSpecLazilyTest.cpp +++ b/clang/unittests/Serialization/LoadSpecLazilyTest.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/ModuleCacheTest.cpp b/clang/unittests/Serialization/ModuleCacheTest.cpp index e9b8da3dba6af..df26e54588b9e 100644 --- a/clang/unittests/Serialization/ModuleCacheTest.cpp +++ b/clang/unittests/Serialization/ModuleCacheTest.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/NoCommentsTest.cpp b/clang/unittests/Serialization/NoCommentsTest.cpp index 01bb6999a7c90..444a082bba907 100644 --- a/clang/unittests/Serialization/NoCommentsTest.cpp +++ b/clang/unittests/Serialization/NoCommentsTest.cpp @@ -9,6 +9,7 @@ #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp b/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp index 55ee72875ead2..b826f20ce4d70 100644 --- a/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp +++ b/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Serialization/VarDeclConstantInitTest.cpp b/clang/unittests/Serialization/VarDeclConstantInitTest.cpp index 743f851fc5fe1..2be01def49809 100644 --- a/clang/unittests/Serialization/VarDeclConstantInitTest.cpp +++ b/clang/unittests/Serialization/VarDeclConstantInitTest.cpp @@ -9,6 +9,7 @@ #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Basic/FileManager.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/clang/unittests/Tooling/SourceCodeTest.cpp b/clang/unittests/Tooling/SourceCodeTest.cpp index 549b77752f1c2..2f59ced0ebc83 100644 --- a/clang/unittests/Tooling/SourceCodeTest.cpp +++ b/clang/unittests/Tooling/SourceCodeTest.cpp @@ -510,10 +510,12 @@ TEST(SourceCodeTest, EditInvolvingExpansionIgnoringExpansionShouldFail) { #define M1(x) x(1) #define M2(x, y) x ## y #define M3(x) foobar(x) +#define M4(x, y) x y int foobar(int); int a = M1(foobar); int b = M2(foo, bar(2)); int c = M3(3); +int d = M4(foobar, (4)); )cpp"); CallsVisitor Visitor; diff --git a/clang/unittests/Tooling/Syntax/TokensTest.cpp b/clang/unittests/Tooling/Syntax/TokensTest.cpp index 47184cbf5d768..468ca5ddd2c75 100644 --- a/clang/unittests/Tooling/Syntax/TokensTest.cpp +++ b/clang/unittests/Tooling/Syntax/TokensTest.cpp @@ -20,6 +20,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TokenKinds.def" #include "clang/Basic/TokenKinds.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/Utils.h" diff --git a/clang/unittests/Tooling/Syntax/TreeTestBase.cpp b/clang/unittests/Tooling/Syntax/TreeTestBase.cpp index b2be64fc08f3d..dad75854240ef 100644 --- a/clang/unittests/Tooling/Syntax/TreeTestBase.cpp +++ b/clang/unittests/Tooling/Syntax/TreeTestBase.cpp @@ -13,6 +13,7 @@ #include "TreeTestBase.h" #include "clang/AST/ASTConsumer.h" #include "clang/Basic/LLVM.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendAction.h" diff --git a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake index ca45d7bd2af7f..c10367715396e 100644 --- a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake +++ b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake @@ -102,7 +102,7 @@ endif() set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS64} ${HEXAGON} ${LOONGARCH64}) set(ALL_SCUDO_STANDALONE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} - ${MIPS32} ${MIPS64} ${PPC64} ${HEXAGON} ${LOONGARCH64} ${RISCV64}) + ${MIPS32} ${MIPS64} ${PPC64} ${HEXAGON} ${LOONGARCH64} ${RISCV64} ${S390X}) if(APPLE) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM64}) else() diff --git a/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c b/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c index aaed134b3ae81..8c04a0091cb11 100644 --- a/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c +++ b/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c @@ -1,6 +1,7 @@ -// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s -// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s -// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all -DOVERRIDE=1 %s -o %t && not --crash %run %t 2>&1 | FileCheck %s --check-prefixes=FATAL +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fsanitize-handler-preserve-all-regs -DPRESERVE %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=PRESERVE +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s +// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all -DOVERRIDE=1 %s -o %t && not --crash %run %t 2>&1 | FileCheck %s --check-prefixes=FATAL #include #include @@ -9,8 +10,21 @@ static int Result; void __ubsan_report_error(const char *kind, uintptr_t caller) { +// -fsanitize-handler-preserve-all-regs is ignored on other architectures. +// Prented we called to other handler on those. +#if defined(PRESERVE) && !defined(__aarch64__) && !defined(__x86_64__) + fprintf(stderr, "CUSTOM_CALLBACK_PRESERVE: %s\n", kind); +#else fprintf(stderr, "CUSTOM_CALLBACK: %s\n", kind); +#endif +} + +#if defined(__aarch64__) || defined(__x86_64__) +[[clang::preserve_all]] void __ubsan_report_error_preserve(const char *kind, + uintptr_t caller) { + fprintf(stderr, "CUSTOM_CALLBACK_PRESERVE: %s\n", kind); } +#endif #if OVERRIDE void __ubsan_report_error_fatal(const char *kind, uintptr_t caller) { @@ -21,5 +35,6 @@ void __ubsan_report_error_fatal(const char *kind, uintptr_t caller) { int main(int argc, const char **argv) { int32_t t0 = (~((uint32_t)0)); // CHECK: CUSTOM_CALLBACK: implicit-conversion + // PRESERVE: CUSTOM_CALLBACK_PRESERVE: implicit-conversion // FATAL: FATAL_CALLBACK: implicit-conversion } diff --git a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py index 792e0be629fc4..68ca50a5e81db 100644 --- a/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py +++ b/cross-project-tests/debuginfo-tests/dexter/dex/debugger/DAP.py @@ -763,20 +763,27 @@ def launch(self, cmdline): launch_request = self._get_launch_params(cmdline) - # For some reason, we *must* submit in the order launch->configurationDone, and then we will receive responses - # in the order configurationDone->launch. - self._flush_breakpoints() + # Per DAP protocol, the correct sequence is: + # 1. Send launch request + # 2. Wait for launch response and "initialized" event + # 3. Set breakpoints + # 4. Send configurationDone to start the process launch_req_id = self.send_message(self.make_request("launch", launch_request)) - config_done_req_id = self.send_message(self.make_request("configurationDone")) - config_done_response = self._await_response(config_done_req_id) - assert config_done_response["success"], "Should simply receive an affirmative?" launch_response = self._await_response(launch_req_id) if not launch_response["success"]: raise DebuggerException( f"failure launching debugger: \"{launch_response['body']['error']['format']}\"" ) - # We can't interact meaningfully with the process until we have the thread ID and confirmation that the process - # has finished launching. + + # Set breakpoints after receiving launch response but before configurationDone. + self._flush_breakpoints() + + # Send configurationDone to allow the process to start running. + config_done_req_id = self.send_message(self.make_request("configurationDone")) + config_done_response = self._await_response(config_done_req_id) + assert config_done_response["success"] + + # Wait for the process to launch and obtain a thread ID. while self._debugger_state.thread is None or not self._debugger_state.launched: time.sleep(0.001) diff --git a/flang-rt/cmake/modules/HandleLibs.cmake b/flang-rt/cmake/modules/HandleLibs.cmake index a193045fc0bfa..9987d6f668978 100644 --- a/flang-rt/cmake/modules/HandleLibs.cmake +++ b/flang-rt/cmake/modules/HandleLibs.cmake @@ -45,8 +45,6 @@ elseif (FLANG_RT_LIBCXX_PROVIDER STREQUAL "llvm") endif () if (FLANG_RT_HAS_STDLIB_FLAG) - target_compile_options(flang-rt-libc-headers INTERFACE - $<$:$> - ) + target_compile_options(flang-rt-libc-headers INTERFACE $<$:-stdlib=libc++>) endif () endif () diff --git a/flang-rt/lib/cuda/allocator.cpp b/flang-rt/lib/cuda/allocator.cpp index 5436051002265..d2aa832883e65 100644 --- a/flang-rt/lib/cuda/allocator.cpp +++ b/flang-rt/lib/cuda/allocator.cpp @@ -19,8 +19,6 @@ #include "flang/Runtime/CUDA/common.h" #include "flang/Support/Fortran.h" -#include "cuda_runtime.h" - namespace Fortran::runtime::cuda { struct DeviceAllocation { @@ -133,6 +131,15 @@ void RTDEF(CUFRegisterAllocator)() { allocatorRegistry.Register( kUnifiedAllocatorPos, {&CUFAllocUnified, CUFFreeUnified}); } + +cudaStream_t RTDECL(CUFAssociatedGetStream)(void *p) { + int pos = findAllocation(p); + if (pos >= 0) { + cudaStream_t stream = deviceAllocations[pos].stream; + return stream; + } + return nullptr; +} } void *CUFAllocPinned( diff --git a/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp b/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp index 9935ae0eaac2f..4e65326b31a62 100644 --- a/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp +++ b/flang-rt/unittests/Runtime/CUDA/Allocatable.cpp @@ -121,3 +121,54 @@ TEST(AllocatableCUFTest, StreamDeviceAllocatable) { cudaDeviceSynchronize(); EXPECT_EQ(cudaSuccess, cudaGetLastError()); } + +TEST(AllocatableAsyncTest, StreamDeviceAllocatable) { + using Fortran::common::TypeCategory; + RTNAME(CUFRegisterAllocator)(); + // REAL(4), DEVICE, ALLOCATABLE :: a(:) + auto a{createAllocatable(TypeCategory::Real, 4)}; + a->SetAllocIdx(kDeviceAllocatorPos); + EXPECT_EQ((int)kDeviceAllocatorPos, a->GetAllocIdx()); + EXPECT_FALSE(a->HasAddendum()); + RTNAME(AllocatableSetBounds)(*a, 0, 1, 10); + + cudaStream_t stream; + cudaStreamCreate(&stream); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + + RTNAME(AllocatableAllocate) + (*a, /*asyncObject=*/(int64_t *)&stream, /*hasStat=*/false, + /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_TRUE(a->IsAllocated()); + cudaDeviceSynchronize(); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + cudaStream_t s = RTDECL(CUFAssociatedGetStream)(a->raw().base_addr); + EXPECT_EQ(s, stream); + RTNAME(AllocatableDeallocate) + (*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_FALSE(a->IsAllocated()); + cudaDeviceSynchronize(); + + cudaStream_t defaultStream = 0; + RTNAME(AllocatableAllocate) + (*a, /*asyncObject=*/(int64_t *)&defaultStream, /*hasStat=*/false, + /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_TRUE(a->IsAllocated()); + cudaDeviceSynchronize(); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + cudaStream_t d = RTDECL(CUFAssociatedGetStream)(a->raw().base_addr); + EXPECT_EQ(d, defaultStream); + RTNAME(AllocatableDeallocate) + (*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__); + EXPECT_FALSE(a->IsAllocated()); + cudaDeviceSynchronize(); + + RTNAME(AllocatableAllocate) + (*a, /*asyncObject=*/nullptr, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, + __LINE__); + EXPECT_TRUE(a->IsAllocated()); + cudaDeviceSynchronize(); + EXPECT_EQ(cudaSuccess, cudaGetLastError()); + cudaStream_t empty = RTDECL(CUFAssociatedGetStream)(a->raw().base_addr); + EXPECT_EQ(empty, nullptr); +} diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td index d416d6c61f178..5d16b9816e318 100644 --- a/flang/include/flang/Optimizer/Dialect/FIROps.td +++ b/flang/include/flang/Optimizer/Dialect/FIROps.td @@ -3753,7 +3753,7 @@ def fir_DeclareReductionOp : fir_Op<"declare_reduction", [IsolatedFromAbove, duplication at the moment. TODO Combine both ops into one. See: https://discourse.llvm.org/t/dialect-for-data-locality-sharing-specifiers-clauses-in-openmp-openacc-and-do-concurrent/86108. - Declares a `do concurrent` reduction. This requires two mandatory and three + Declares a `do concurrent` reduction. This requires two mandatory and four optional regions. 1. The optional alloc region specifies how to allocate the thread-local @@ -3782,6 +3782,9 @@ def fir_DeclareReductionOp : fir_Op<"declare_reduction", [IsolatedFromAbove, allocated by the initializer region. The region has an argument that contains the value of the thread-local reduction accumulator. This will be executed after the reduction has completed. + 6. The DataPtrPtr region specifies how to access the base address of a + boxed-value. This is used, in particular, for GPU reductions in order + know where partial reduction results are stored in remote lanes. Note that the MLIR type system does not allow for type-polymorphic reductions. Separate reduction declarations should be created for different @@ -3789,23 +3792,30 @@ def fir_DeclareReductionOp : fir_Op<"declare_reduction", [IsolatedFromAbove, For initializer and reduction regions, the operand to `fir.yield` must match the parent operation's results. + + * `$byref_element_type`: For by-ref reductions, we want to keep track of the + boxed/allocated type. For example, for a `real, allocatable` variable, + `real` should be stored in this attribute. }]; let arguments = (ins SymbolNameAttr:$sym_name, - TypeAttr:$type); + TypeAttr:$type, + OptionalAttr:$byref_element_type); let regions = (region MaxSizedRegion<1>:$allocRegion, AnyRegion:$initializerRegion, AnyRegion:$reductionRegion, AnyRegion:$atomicReductionRegion, - AnyRegion:$cleanupRegion); + AnyRegion:$cleanupRegion, + AnyRegion:$dataPtrPtrRegion); let assemblyFormat = "$sym_name `:` $type attr-dict-with-keyword " "( `alloc` $allocRegion^ )? " "`init` $initializerRegion " "`combiner` $reductionRegion " "( `atomic` $atomicReductionRegion^ )? " - "( `cleanup` $cleanupRegion^ )? "; + "( `cleanup` $cleanupRegion^ )? " + "( `data_ptr_ptr` $dataPtrPtrRegion^ )? "; let extraClassDeclaration = [{ mlir::BlockArgument getAllocMoldArg() { diff --git a/flang/include/flang/Optimizer/Transforms/Passes.h b/flang/include/flang/Optimizer/Transforms/Passes.h index f83a1559fa016..4dcdddaac8ee5 100644 --- a/flang/include/flang/Optimizer/Transforms/Passes.h +++ b/flang/include/flang/Optimizer/Transforms/Passes.h @@ -40,7 +40,6 @@ std::unique_ptr createArrayValueCopyPass(fir::ArrayValueCopyOptions options = {}); std::unique_ptr createMemDataFlowOptPass(); std::unique_ptr createPromoteToAffinePass(); -std::unique_ptr createFIRToSCFPass(); std::unique_ptr createAddDebugInfoPass(fir::AddDebugInfoOptions options = {}); diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td index 0f613584c6e17..f5403ab6ff503 100644 --- a/flang/include/flang/Optimizer/Transforms/Passes.td +++ b/flang/include/flang/Optimizer/Transforms/Passes.td @@ -81,7 +81,6 @@ def FIRToSCFPass : Pass<"fir-to-scf"> { let description = [{ Convert FIR structured control flow ops to SCF dialect. }]; - let constructor = "::fir::createFIRToSCFPass()"; let dependentDialects = [ "fir::FIROpsDialect", "mlir::scf::SCFDialect" ]; diff --git a/flang/include/flang/Runtime/CUDA/allocator.h b/flang/include/flang/Runtime/CUDA/allocator.h index 59fdb22b6e663..4e38482a7de30 100644 --- a/flang/include/flang/Runtime/CUDA/allocator.h +++ b/flang/include/flang/Runtime/CUDA/allocator.h @@ -13,11 +13,14 @@ #include "flang/Runtime/descriptor-consts.h" #include "flang/Runtime/entry-names.h" +#include "cuda_runtime.h" + namespace Fortran::runtime::cuda { extern "C" { void RTDECL(CUFRegisterAllocator)(); +cudaStream_t RTDECL(CUFAssociatedGetStream)(void *); } void *CUFAllocPinned(std::size_t, std::int64_t *); diff --git a/flang/lib/Frontend/CMakeLists.txt b/flang/lib/Frontend/CMakeLists.txt index bb0b4a39cec9b..fb74b3dcb280e 100644 --- a/flang/lib/Frontend/CMakeLists.txt +++ b/flang/lib/Frontend/CMakeLists.txt @@ -75,7 +75,6 @@ add_flang_library(flangFrontend CLANG_LIBS clangBasic - clangDriver clangOptions ) diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp index 0c32f3914e04b..b6c4e6303cdac 100644 --- a/flang/lib/Frontend/CompilerInvocation.cpp +++ b/flang/lib/Frontend/CompilerInvocation.cpp @@ -325,10 +325,9 @@ static void parseCodeGenArgs(Fortran::frontend::CodeGenOptions &opts, for (auto *a : args.filtered(clang::options::OPT_fpass_plugin_EQ)) opts.LLVMPassPlugins.push_back(a->getValue()); - opts.Reciprocals = clang::driver::tools::parseMRecipOption(diags, args); + opts.Reciprocals = clang::parseMRecipOption(diags, args); - opts.PreferVectorWidth = - clang::driver::tools::parseMPreferVectorWidthOption(diags, args); + opts.PreferVectorWidth = clang::parseMPreferVectorWidthOption(diags, args); // -fembed-offload-object option for (auto *a : args.filtered(clang::options::OPT_fembed_offload_object_EQ)) diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.h b/flang/lib/Lower/OpenMP/ClauseProcessor.h index 529b871330052..54ec9c5f0d752 100644 --- a/flang/lib/Lower/OpenMP/ClauseProcessor.h +++ b/flang/lib/Lower/OpenMP/ClauseProcessor.h @@ -20,7 +20,6 @@ #include "flang/Lower/OpenMP/Clauses.h" #include "flang/Lower/Support/ReductionProcessor.h" #include "flang/Optimizer/Builder/Todo.h" -#include "flang/Parser/dump-parse-tree.h" #include "flang/Parser/parse-tree.h" #include "mlir/Dialect/OpenMP/OpenMPDialect.h" diff --git a/flang/lib/Lower/Support/ReductionProcessor.cpp b/flang/lib/Lower/Support/ReductionProcessor.cpp index 721cb45cd7d24..db8ad909b1d2f 100644 --- a/flang/lib/Lower/Support/ReductionProcessor.cpp +++ b/flang/lib/Lower/Support/ReductionProcessor.cpp @@ -572,10 +572,21 @@ DeclareRedType ReductionProcessor::createDeclareReductionHelper( mlir::OpBuilder modBuilder(module.getBodyRegion()); mlir::Type valTy = fir::unwrapRefType(type); - if (!isByRef) + + // For by-ref reductions, we want to keep track of the + // boxed/referenced/allocated type. For example, for a `real, allocatable` + // variable, `real` should be stored. + mlir::TypeAttr boxedTyAttr{}; + mlir::Type boxedTy; + + if (isByRef) { + boxedTy = fir::unwrapPassByRefType(valTy); + boxedTyAttr = mlir::TypeAttr::get(boxedTy); + } else type = valTy; - decl = DeclareRedType::create(modBuilder, loc, reductionOpName, type); + decl = DeclareRedType::create(modBuilder, loc, reductionOpName, type, + boxedTyAttr); createReductionAllocAndInitRegions(converter, loc, decl, genInitValueCB, type, isByRef); builder.createBlock(&decl.getReductionRegion(), @@ -585,6 +596,38 @@ DeclareRedType ReductionProcessor::createDeclareReductionHelper( mlir::Value op1 = decl.getReductionRegion().front().getArgument(0); mlir::Value op2 = decl.getReductionRegion().front().getArgument(1); genCombinerCB(builder, loc, type, op1, op2, isByRef); + + if (isByRef && fir::isa_box_type(valTy)) { + bool isBoxReductionSupported = [&]() { + auto offloadMod = llvm::dyn_cast( + *builder.getModule()); + + // This check tests the implementation status on the GPU. Box reductions + // are fully supported on the CPU. + if (!offloadMod.getIsGPU()) + return true; + + auto seqTy = mlir::dyn_cast(boxedTy); + + // Dynamically-shaped arrays are not supported yet on the GPU. + return !seqTy || !fir::sequenceWithNonConstantShape(seqTy); + }(); + + if (!isBoxReductionSupported) { + TODO(loc, "Reduction of dynamically-shaped arrays are not supported yet " + "on the GPU."); + } + + mlir::Region &dataPtrPtrRegion = decl.getDataPtrPtrRegion(); + mlir::Block &dataAddrBlock = *builder.createBlock( + &dataPtrPtrRegion, dataPtrPtrRegion.end(), {type}, {loc}); + builder.setInsertionPointToEnd(&dataAddrBlock); + mlir::Value boxRefOperand = dataAddrBlock.getArgument(0); + mlir::Value baseAddrOffset = fir::BoxOffsetOp::create( + builder, loc, boxRefOperand, fir::BoxFieldAttr::base_addr); + genYield(builder, loc, baseAddrOffset); + } + return decl; } diff --git a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp index 9aad8cddc60a1..1012a9608aa27 100644 --- a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp +++ b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp @@ -848,7 +848,8 @@ class DoConcurrentConversion if (!ompReducer) { ompReducer = mlir::omp::DeclareReductionOp::create( rewriter, firReducer.getLoc(), ompReducerName, - firReducer.getTypeAttr().getValue()); + firReducer.getTypeAttr().getValue(), + firReducer.getByrefElementTypeAttr()); cloneFIRRegionToOMP(rewriter, firReducer.getAllocRegion(), ompReducer.getAllocRegion()); diff --git a/flang/lib/Optimizer/Transforms/FIRToSCF.cpp b/flang/lib/Optimizer/Transforms/FIRToSCF.cpp index e72ee333101f5..187caa6043ac8 100644 --- a/flang/lib/Optimizer/Transforms/FIRToSCF.cpp +++ b/flang/lib/Optimizer/Transforms/FIRToSCF.cpp @@ -18,6 +18,8 @@ namespace fir { namespace { class FIRToSCFPass : public fir::impl::FIRToSCFPassBase { + using FIRToSCFPassBase::FIRToSCFPassBase; + public: void runOnOperation() override; }; @@ -230,7 +232,3 @@ void FIRToSCFPass::runOnOperation() { fir::populateFIRToSCFRewrites(patterns, parallelUnordered); walkAndApplyPatterns(getOperation(), std::move(patterns)); } - -std::unique_ptr fir::createFIRToSCFPass() { - return std::make_unique(); -} diff --git a/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 b/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 index 4b6a643f94059..4c7b6ac5f5f9b 100644 --- a/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 +++ b/flang/test/Lower/OpenMP/delayed-privatization-reduction-byref.f90 @@ -22,7 +22,7 @@ subroutine red_and_delayed_private ! CHECK-SAME: @[[PRIVATIZER_SYM:.*]] : i32 ! CHECK-LABEL: omp.declare_reduction -! CHECK-SAME: @[[REDUCTION_SYM:.*]] : !fir.ref alloc +! CHECK-SAME: @[[REDUCTION_SYM:.*]] : !fir.ref attributes {byref_element_type = i32} alloc ! CHECK-LABEL: _QPred_and_delayed_private ! CHECK: omp.parallel diff --git a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 index 41c7d69ebb3ba..f56875dcb518b 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 @@ -18,7 +18,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_Uxi32 : !fir.ref>>> attributes {byref_element_type = !fir.array} alloc { ! CHECK: %[[VAL_10:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_10]] : !fir.ref>>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 b/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 index aa91e1e0e8b15..d9ba3bed464f8 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-array-lb.f90 @@ -12,7 +12,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x2xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_15:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_15]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-array.f90 index 59595de338d50..636660f279e85 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-array.f90 @@ -17,7 +17,7 @@ program reduce print *,i end program -! CPU-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> alloc { +! CPU-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> attributes {byref_element_type = !fir.array<3xi32>} alloc { ! CPU: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CPU: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CPU-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 b/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 index 14338c6f50817..9cf8a63427ed1 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-array2.f90 @@ -13,7 +13,7 @@ program reduce print *,i end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 index 36344458d1cae..3de2ba8f61f8e 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction-pointer-array.f90 @@ -19,7 +19,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_Uxi32 : !fir.ref>>> attributes {byref_element_type = !fir.array} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/parallel-reduction3.f90 b/flang/test/Lower/OpenMP/parallel-reduction3.f90 index 6ff7f96b2b9bf..7437e1d35a624 100644 --- a/flang/test/Lower/OpenMP/parallel-reduction3.f90 +++ b/flang/test/Lower/OpenMP/parallel-reduction3.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s ! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 b/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 index bd91fa51a6988..779322712dbfe 100644 --- a/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 +++ b/flang/test/Lower/OpenMP/reduction-array-intrinsic.f90 @@ -9,7 +9,7 @@ subroutine max_array_reduction(l, r) !$omp end parallel end subroutine -! CHECK-LABEL: omp.declare_reduction @max_byref_box_Uxi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @max_byref_box_Uxi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/sections-array-reduction.f90 b/flang/test/Lower/OpenMP/sections-array-reduction.f90 index 1d286008a11f3..57e46c7bc8cae 100644 --- a/flang/test/Lower/OpenMP/sections-array-reduction.f90 +++ b/flang/test/Lower/OpenMP/sections-array-reduction.f90 @@ -14,7 +14,7 @@ subroutine sectionsReduction(x) end subroutine -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> {{.*}} alloc { ! [...] ! CHECK: omp.yield ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 b/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 index 18a4f75b86309..3a63bb09c59de 100644 --- a/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 +++ b/flang/test/Lower/OpenMP/taskgroup-task-array-reduction.f90 @@ -1,7 +1,7 @@ ! RUN: bbc -emit-hlfir -fopenmp -fopenmp-version=50 -o - %s 2>&1 | FileCheck %s ! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 -o - %s 2>&1 | FileCheck %s -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf32 : !fir.ref>> {{.*}} alloc { ! [...] ! CHECK: omp.yield ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 index 2cd953de0dffa..ed81577ecce16 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 @@ -32,7 +32,7 @@ program reduce15 print *,"min: ", mins end program -! CHECK-LABEL: omp.declare_reduction @min_byref_box_heap_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @min_byref_box_heap_Uxi32 : !fir.ref>>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>>) ! CHECK-LABEL: } init { @@ -93,7 +93,7 @@ program reduce15 ! CHECK: omp.yield ! CHECK: } -! CHECK-LABEL: omp.declare_reduction @max_byref_box_heap_Uxi32 : !fir.ref>>> alloc { +! CHECK-LABEL: omp.declare_reduction @max_byref_box_heap_Uxi32 : !fir.ref>>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box>> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 index 663851cba46c6..d8c0a36db126e 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 @@ -18,7 +18,7 @@ program reduce end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_i32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_heap_i32 : !fir.ref>> attributes {byref_element_type = i32} alloc { ! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_2]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 index 7184b3b102fd8..7ce1be03682b4 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 @@ -22,7 +22,7 @@ subroutine reduce(r) end subroutine end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf64 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxf64 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 index 2233a74600948..ec448cf20f111 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb.f90 @@ -11,7 +11,7 @@ program reduce !$omp end parallel do end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: } combiner { ! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>>, %[[ARG1:.*]]: !fir.ref>>): ! CHECK: %[[ARR0:.*]] = fir.load %[[ARG0]] : !fir.ref>> diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 index 211bde19da8db..9da05a290ec21 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-lb2.f90 @@ -19,7 +19,7 @@ subroutine sub(a, lb, ub) end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_Uxi32 : !fir.ref>> {{.*}} alloc { ! CHECK: } combiner { ! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>>, %[[ARG1:.*]]: !fir.ref>>): ! CHECK: %[[ARR0:.*]] = fir.load %[[ARG0]] : !fir.ref>> diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 index afaeba27c5eae..14b657c8e180d 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 @@ -14,7 +14,7 @@ program reduce print *,r end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> attributes {byref_element_type = !fir.array<2xi32>} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 index 25b2e97a1b7f7..d0a0c38e4ccb1 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 @@ -14,7 +14,7 @@ program reduce print *,r end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_2xi32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_8:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_8]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 index edd2bcb1d6be8..60a162d8f8002 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 @@ -24,7 +24,7 @@ program main endprogram -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x3xf64 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_3x3xf64 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 index 27b726376fbeb..f640f5caddf76 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-pointer.f90 @@ -18,7 +18,7 @@ program reduce_pointer deallocate(v) end program -! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_i32 : !fir.ref>> alloc { +! CHECK-LABEL: omp.declare_reduction @add_reduction_byref_box_ptr_i32 : !fir.ref>> {{.*}} alloc { ! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box> ! CHECK: omp.yield(%[[VAL_3]] : !fir.ref>>) ! CHECK-LABEL: } init { diff --git a/flang/test/Lower/do_concurrent_reduce_allocatable.f90 b/flang/test/Lower/do_concurrent_reduce_allocatable.f90 index 873fd10dd1b97..4fb67c094b594 100644 --- a/flang/test/Lower/do_concurrent_reduce_allocatable.f90 +++ b/flang/test/Lower/do_concurrent_reduce_allocatable.f90 @@ -8,7 +8,7 @@ subroutine do_concurrent_allocatable end do end subroutine -! CHECK: fir.declare_reduction @[[RED_OP:.*]] : ![[RED_TYPE:.*]] alloc { +! CHECK: fir.declare_reduction @[[RED_OP:.*]] : ![[RED_TYPE:.*]] attributes {byref_element_type = !fir.array} alloc { ! CHECK: %[[ALLOC:.*]] = fir.alloca ! CHECK: fir.yield(%[[ALLOC]] : ![[RED_TYPE]]) ! CHECK: } init { diff --git a/libcxx/include/__compare/is_eq.h b/libcxx/include/__compare/is_eq.h index 9a82df1ebe88b..ee4d11bc7c792 100644 --- a/libcxx/include/__compare/is_eq.h +++ b/libcxx/include/__compare/is_eq.h @@ -20,12 +20,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 20 -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_eq(partial_ordering __c) noexcept { return __c == 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_neq(partial_ordering __c) noexcept { return __c != 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lt(partial_ordering __c) noexcept { return __c < 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lteq(partial_ordering __c) noexcept { return __c <= 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gt(partial_ordering __c) noexcept { return __c > 0; } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gteq(partial_ordering __c) noexcept { return __c >= 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_eq(partial_ordering __c) noexcept { return __c == 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_neq(partial_ordering __c) noexcept { return __c != 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lt(partial_ordering __c) noexcept { return __c < 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_lteq(partial_ordering __c) noexcept { return __c <= 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gt(partial_ordering __c) noexcept { return __c > 0; } +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline constexpr bool is_gteq(partial_ordering __c) noexcept { return __c >= 0; } #endif // _LIBCPP_STD_VER >= 20 diff --git a/libcxx/include/__condition_variable/condition_variable.h b/libcxx/include/__condition_variable/condition_variable.h index 1e8edd5dcb009..b7151930e9226 100644 --- a/libcxx/include/__condition_variable/condition_variable.h +++ b/libcxx/include/__condition_variable/condition_variable.h @@ -170,7 +170,7 @@ class _LIBCPP_EXPORTED_FROM_ABI condition_variable { wait_for(unique_lock& __lk, const chrono::duration<_Rep, _Period>& __d, _Predicate __pred); typedef __libcpp_condvar_t* native_handle_type; - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__cv_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__cv_; } private: void diff --git a/libcxx/include/__coroutine/coroutine_handle.h b/libcxx/include/__coroutine/coroutine_handle.h index b7add258510eb..b26a650748832 100644 --- a/libcxx/include/__coroutine/coroutine_handle.h +++ b/libcxx/include/__coroutine/coroutine_handle.h @@ -44,9 +44,9 @@ struct coroutine_handle { } // [coroutine.handle.export.import], export/import - _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } - _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { coroutine_handle __tmp; __tmp.__handle_ = __addr; return __tmp; @@ -55,7 +55,7 @@ struct coroutine_handle { // [coroutine.handle.observers], observers _LIBCPP_HIDE_FROM_ABI constexpr explicit operator bool() const noexcept { return __handle_ != nullptr; } - _LIBCPP_HIDE_FROM_ABI bool done() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool done() const { _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(__is_suspended(), "done() can be called only on suspended coroutines"); return __builtin_coro_done(__handle_); } @@ -100,7 +100,7 @@ struct coroutine_handle { _LIBCPP_HIDE_FROM_ABI constexpr coroutine_handle(nullptr_t) noexcept {} - _LIBCPP_HIDE_FROM_ABI static coroutine_handle from_promise(_Promise& __promise) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static coroutine_handle from_promise(_Promise& __promise) { using _RawPromise = __remove_cv_t<_Promise>; coroutine_handle __tmp; __tmp.__handle_ = @@ -114,9 +114,9 @@ struct coroutine_handle { } // [coroutine.handle.export.import], export/import - _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } - _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr coroutine_handle from_address(void* __addr) noexcept { coroutine_handle __tmp; __tmp.__handle_ = __addr; return __tmp; @@ -130,7 +130,7 @@ struct coroutine_handle { // [coroutine.handle.observers], observers _LIBCPP_HIDE_FROM_ABI constexpr explicit operator bool() const noexcept { return __handle_ != nullptr; } - _LIBCPP_HIDE_FROM_ABI bool done() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool done() const { _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(__is_suspended(), "done() can be called only on suspended coroutines"); return __builtin_coro_done(__handle_); } @@ -150,7 +150,7 @@ struct coroutine_handle { } // [coroutine.handle.promise], promise access - _LIBCPP_HIDE_FROM_ABI _Promise& promise() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _Promise& promise() const { return *static_cast<_Promise*>(__builtin_coro_promise(this->__handle_, alignof(_Promise), false)); } @@ -165,7 +165,7 @@ struct coroutine_handle { // [coroutine.handle.hash] template struct hash> { - _LIBCPP_HIDE_FROM_ABI size_t operator()(const coroutine_handle<_Tp>& __v) const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI size_t operator()(const coroutine_handle<_Tp>& __v) const noexcept { return hash()(__v.address()); } }; diff --git a/libcxx/include/__coroutine/noop_coroutine_handle.h b/libcxx/include/__coroutine/noop_coroutine_handle.h index 692398a8a8431..b9c54d3b42bef 100644 --- a/libcxx/include/__coroutine/noop_coroutine_handle.h +++ b/libcxx/include/__coroutine/noop_coroutine_handle.h @@ -35,7 +35,7 @@ struct coroutine_handle { // [coroutine.handle.noop.observers], observers _LIBCPP_HIDE_FROM_ABI constexpr explicit operator bool() const noexcept { return true; } - _LIBCPP_HIDE_FROM_ABI constexpr bool done() const noexcept { return false; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool done() const noexcept { return false; } // [coroutine.handle.noop.resumption], resumption _LIBCPP_HIDE_FROM_ABI constexpr void operator()() const noexcept {} @@ -43,13 +43,13 @@ struct coroutine_handle { _LIBCPP_HIDE_FROM_ABI constexpr void destroy() const noexcept {} // [coroutine.handle.noop.promise], promise access - _LIBCPP_HIDE_FROM_ABI noop_coroutine_promise& promise() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI noop_coroutine_promise& promise() const noexcept { return *static_cast( __builtin_coro_promise(this->__handle_, alignof(noop_coroutine_promise), false)); } // [coroutine.handle.noop.address], address - _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr void* address() const noexcept { return __handle_; } private: _LIBCPP_HIDE_FROM_ABI friend coroutine_handle noop_coroutine() noexcept; @@ -86,7 +86,9 @@ inline noop_coroutine_handle::__noop_coroutine_frame_ty_ noop_coroutine_handle:: # endif // [coroutine.noop.coroutine] -inline _LIBCPP_HIDE_FROM_ABI noop_coroutine_handle noop_coroutine() noexcept { return noop_coroutine_handle(); } +[[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI noop_coroutine_handle noop_coroutine() noexcept { + return noop_coroutine_handle(); +} _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__flat_map/flat_map.h b/libcxx/include/__flat_map/flat_map.h index 159e652e1a326..84b60cdc9ae27 100644 --- a/libcxx/include/__flat_map/flat_map.h +++ b/libcxx/include/__flat_map/flat_map.h @@ -409,41 +409,45 @@ class flat_map { } // iterators - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator begin() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator begin() noexcept { return iterator(__containers_.keys.begin(), __containers_.values.begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator begin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator begin() const noexcept { return const_iterator(__containers_.keys.begin(), __containers_.values.begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator end() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator end() noexcept { return iterator(__containers_.keys.end(), __containers_.values.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator end() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator end() const noexcept { return const_iterator(__containers_.keys.end(), __containers_.values.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rbegin() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rbegin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rend() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 reverse_iterator rend() noexcept { return reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rend() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cbegin() const noexcept { return begin(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cend() const noexcept { return end(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crbegin() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cbegin() const noexcept { + return begin(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator cend() const noexcept { + return end(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crend() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_reverse_iterator crend() const noexcept { return const_reverse_iterator(begin()); } @@ -452,22 +456,22 @@ class flat_map { return __containers_.keys.empty(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type size() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type size() const noexcept { return __containers_.keys.size(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type max_size() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type max_size() const noexcept { return std::min(__containers_.keys.max_size(), __containers_.values.max_size()); } // [flat.map.access], element access - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](const key_type& __x) + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](const key_type& __x) requires is_constructible_v { return try_emplace(__x).first->second; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](key_type&& __x) + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](key_type&& __x) requires is_constructible_v { return try_emplace(std::move(__x)).first->second; @@ -476,11 +480,11 @@ class flat_map { template requires(__is_compare_transparent && is_constructible_v && is_constructible_v && !is_convertible_v<_Kp &&, const_iterator> && !is_convertible_v<_Kp &&, iterator>) - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](_Kp&& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& operator[](_Kp&& __x) { return try_emplace(std::forward<_Kp>(__x)).first->second; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const key_type& __x) { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const key_type&): Key does not exist"); @@ -488,7 +492,7 @@ class flat_map { return __it->second; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const key_type& __x) const { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const key_type&) const: Key does not exist"); @@ -498,7 +502,7 @@ class flat_map { template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 mapped_type& at(const _Kp& __x) { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const K&): Key does not exist"); @@ -508,7 +512,7 @@ class flat_map { template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_type& at(const _Kp& __x) const { auto __it = find(__x); if (__it == end()) { std::__throw_out_of_range("flat_map::at(const K&) const: Key does not exist"); @@ -596,7 +600,7 @@ class flat_map { insert(sorted_unique, __il.begin(), __il.end()); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 containers extract() && { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 containers extract() && { auto __guard = std::__make_scope_guard([&]() noexcept { clear() /* noexcept */; }); auto __ret = std::move(__containers_); return __ret; @@ -753,116 +757,121 @@ class flat_map { } // observers - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 key_compare key_comp() const { return __compare_; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 value_compare value_comp() const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 key_compare key_comp() const { return __compare_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 value_compare value_comp() const { return value_compare(__compare_); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const key_container_type& keys() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const key_container_type& keys() const noexcept { return __containers_.keys; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_container_type& values() const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const mapped_container_type& + values() const noexcept { return __containers_.values; } // map operations - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const key_type& __x) { return __find_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const key_type& __x) const { return __find_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator find(const _Kp& __x) { return __find_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator find(const _Kp& __x) const { return __find_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const key_type& __x) const { return contains(__x) ? 1 : 0; } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 size_type count(const _Kp& __x) const { return contains(__x) ? 1 : 0; } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const key_type& __x) const { return find(__x) != end(); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 bool contains(const _Kp& __x) const { return find(__x) != end(); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const key_type& __x) { return __lower_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator + lower_bound(const key_type& __x) const { return __lower_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator lower_bound(const _Kp& __x) { return __lower_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator lower_bound(const _Kp& __x) const { return __lower_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const key_type& __x) { return __upper_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const key_type& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator + upper_bound(const key_type& __x) const { return __upper_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 iterator upper_bound(const _Kp& __x) { return __upper_bound(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const _Kp& __x) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 const_iterator upper_bound(const _Kp& __x) const { return __upper_bound(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const key_type& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + equal_range(const key_type& __x) { return __equal_range_impl(*this, __x); } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const key_type& __x) const { return __equal_range_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const _Kp& __x) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + equal_range(const _Kp& __x) { return __equal_range_impl(*this, __x); } template requires __is_compare_transparent - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 pair equal_range(const _Kp& __x) const { return __equal_range_impl(*this, __x); } diff --git a/libcxx/include/__flat_map/utils.h b/libcxx/include/__flat_map/utils.h index 3a05c715660dc..4b07e388d0255 100644 --- a/libcxx/include/__flat_map/utils.h +++ b/libcxx/include/__flat_map/utils.h @@ -16,6 +16,7 @@ #include <__utility/exception_guard.h> #include <__utility/forward.h> #include <__utility/move.h> +#include <__vector/container_traits.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/libcxx/include/__mdspan/extents.h b/libcxx/include/__mdspan/extents.h index 26219557dbae9..d16bbd2af44f1 100644 --- a/libcxx/include/__mdspan/extents.h +++ b/libcxx/include/__mdspan/extents.h @@ -299,11 +299,13 @@ class extents { public: // [mdspan.extents.obs], observers of multidimensional index space - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return __rank_; } - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return __rank_dynamic_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return __rank_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return __rank_dynamic_; } - _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { return __vals_.__value(__r); } - _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { + return __vals_.__value(__r); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { return _Values::__static_value(__r); } diff --git a/libcxx/include/__mdspan/mdspan.h b/libcxx/include/__mdspan/mdspan.h index c0f27678197ce..9f3139a874ff9 100644 --- a/libcxx/include/__mdspan/mdspan.h +++ b/libcxx/include/__mdspan/mdspan.h @@ -87,12 +87,14 @@ class mdspan { using data_handle_type = typename accessor_type::data_handle_type; using reference = typename accessor_type::reference; - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return extents_type::rank(); } - _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return extents_type::rank_dynamic(); } - _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return extents_type::rank(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { + return extents_type::rank_dynamic(); + } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { return extents_type::static_extent(__r); } - _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { return __map_.extents().extent(__r); }; @@ -185,7 +187,7 @@ class mdspan { requires((is_convertible_v<_OtherIndexTypes, index_type> && ...) && (is_nothrow_constructible_v && ...) && (sizeof...(_OtherIndexTypes) == rank())) - _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexTypes... __indices) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexTypes... __indices) const { // Note the standard layouts would also check this, but user provided ones may not, so we // check the precondition here _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__mdspan_detail::__is_multidimensional_index_in(extents(), __indices...), @@ -196,7 +198,8 @@ class mdspan { template requires(is_convertible_v && is_nothrow_constructible_v) - _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](const array< _OtherIndexType, rank()>& __indices) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr reference + operator[](const array< _OtherIndexType, rank()>& __indices) const { return __acc_.access(__ptr_, [&](index_sequence<_Idxs...>) { return __map_(__indices[_Idxs]...); }(make_index_sequence())); @@ -205,7 +208,7 @@ class mdspan { template requires(is_convertible_v && is_nothrow_constructible_v) - _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](span<_OtherIndexType, rank()> __indices) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr reference operator[](span<_OtherIndexType, rank()> __indices) const { return __acc_.access(__ptr_, [&](index_sequence<_Idxs...>) { return __map_(__indices[_Idxs]...); }(make_index_sequence())); @@ -237,24 +240,28 @@ class mdspan { swap(__x.__acc_, __y.__acc_); } - _LIBCPP_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { return __map_.extents(); }; - _LIBCPP_HIDE_FROM_ABI constexpr const data_handle_type& data_handle() const noexcept { return __ptr_; }; - _LIBCPP_HIDE_FROM_ABI constexpr const mapping_type& mapping() const noexcept { return __map_; }; - _LIBCPP_HIDE_FROM_ABI constexpr const accessor_type& accessor() const noexcept { return __acc_; }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { + return __map_.extents(); + }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const data_handle_type& data_handle() const noexcept { return __ptr_; }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const mapping_type& mapping() const noexcept { return __map_; }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const accessor_type& accessor() const noexcept { return __acc_; }; // per LWG-4021 "mdspan::is_always_meow() should be noexcept" - _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept { return mapping_type::is_always_unique(); }; - _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept { + return mapping_type::is_always_unique(); + }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept { return mapping_type::is_always_exhaustive(); }; - _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept { return mapping_type::is_always_strided(); }; - _LIBCPP_HIDE_FROM_ABI constexpr bool is_unique() const { return __map_.is_unique(); }; - _LIBCPP_HIDE_FROM_ABI constexpr bool is_exhaustive() const { return __map_.is_exhaustive(); }; - _LIBCPP_HIDE_FROM_ABI constexpr bool is_strided() const { return __map_.is_strided(); }; - _LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const { return __map_.stride(__r); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool is_unique() const { return __map_.is_unique(); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool is_exhaustive() const { return __map_.is_exhaustive(); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool is_strided() const { return __map_.is_strided(); }; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const { return __map_.stride(__r); }; private: _LIBCPP_NO_UNIQUE_ADDRESS data_handle_type __ptr_{}; diff --git a/libcxx/include/__mutex/mutex.h b/libcxx/include/__mutex/mutex.h index 68c8842b35eda..e9cedf8db1cca 100644 --- a/libcxx/include/__mutex/mutex.h +++ b/libcxx/include/__mutex/mutex.h @@ -37,11 +37,11 @@ class _LIBCPP_EXPORTED_FROM_ABI _LIBCPP_CAPABILITY("mutex") mutex { # endif _LIBCPP_ACQUIRE_CAPABILITY() void lock(); - _LIBCPP_TRY_ACQUIRE_CAPABILITY(true) bool try_lock() _NOEXCEPT; + [[__nodiscard__]] _LIBCPP_TRY_ACQUIRE_CAPABILITY(true) bool try_lock() _NOEXCEPT; _LIBCPP_RELEASE_CAPABILITY void unlock() _NOEXCEPT; typedef __libcpp_mutex_t* native_handle_type; - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } }; static_assert(is_nothrow_default_constructible::value, "the default constructor for std::mutex must be nothrow"); diff --git a/libcxx/include/__thread/thread.h b/libcxx/include/__thread/thread.h index a3b672bc0f0e7..561f092ddb7c0 100644 --- a/libcxx/include/__thread/thread.h +++ b/libcxx/include/__thread/thread.h @@ -242,13 +242,13 @@ class _LIBCPP_EXPORTED_FROM_ABI thread { _LIBCPP_HIDE_FROM_ABI void swap(thread& __t) _NOEXCEPT { std::swap(__t_, __t.__t_); } - _LIBCPP_HIDE_FROM_ABI bool joinable() const _NOEXCEPT { return !__libcpp_thread_isnull(&__t_); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool joinable() const _NOEXCEPT { return !__libcpp_thread_isnull(&__t_); } void join(); void detach(); - _LIBCPP_HIDE_FROM_ABI id get_id() const _NOEXCEPT { return __libcpp_thread_get_id(&__t_); } - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() _NOEXCEPT { return __t_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI id get_id() const _NOEXCEPT { return __libcpp_thread_get_id(&__t_); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() _NOEXCEPT { return __t_; } - static unsigned hardware_concurrency() _NOEXCEPT; + [[__nodiscard__]] static unsigned hardware_concurrency() _NOEXCEPT; }; inline _LIBCPP_HIDE_FROM_ABI void swap(thread& __x, thread& __y) _NOEXCEPT { __x.swap(__y); } diff --git a/libcxx/include/__utility/cmp.h b/libcxx/include/__utility/cmp.h index 68864e23e0397..7cfe640ceb423 100644 --- a/libcxx/include/__utility/cmp.h +++ b/libcxx/include/__utility/cmp.h @@ -31,7 +31,7 @@ concept __comparison_can_promote_to = sizeof(_Tp) < sizeof(_Ip) || (sizeof(_Tp) == sizeof(_Ip) && __signed_integer<_Tp>); template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_equal(_Tp __t, _Up __u) noexcept { if constexpr (is_signed_v<_Tp> == is_signed_v<_Up>) return __t == __u; else if constexpr (__comparison_can_promote_to<_Tp, int> && __comparison_can_promote_to<_Up, int>) @@ -45,12 +45,12 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_equal(_Tp __t, _Up __u) noexcept { } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_not_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_not_equal(_Tp __t, _Up __u) noexcept { return !std::cmp_equal(__t, __u); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less(_Tp __t, _Up __u) noexcept { if constexpr (is_signed_v<_Tp> == is_signed_v<_Up>) return __t < __u; else if constexpr (__comparison_can_promote_to<_Tp, int> && __comparison_can_promote_to<_Up, int>) @@ -64,22 +64,22 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less(_Tp __t, _Up __u) noexcept { } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater(_Tp __t, _Up __u) noexcept { return std::cmp_less(__u, __t); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_less_equal(_Tp __t, _Up __u) noexcept { return !std::cmp_greater(__t, __u); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater_equal(_Tp __t, _Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool cmp_greater_equal(_Tp __t, _Up __u) noexcept { return !std::cmp_less(__t, __u); } template <__signed_or_unsigned_integer _Tp, __signed_or_unsigned_integer _Up> -_LIBCPP_HIDE_FROM_ABI constexpr bool in_range(_Up __u) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool in_range(_Up __u) noexcept { return std::cmp_less_equal(__u, numeric_limits<_Tp>::max()) && std::cmp_greater_equal(__u, numeric_limits<_Tp>::min()); } diff --git a/libcxx/include/barrier b/libcxx/include/barrier index 41fbfb3e8fb7b..5f9b471f01741 100644 --- a/libcxx/include/barrier +++ b/libcxx/include/barrier @@ -158,7 +158,9 @@ class barrier { public: using arrival_token = typename __barrier_base<_CompletionF>::arrival_token; - static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { return __barrier_base<_CompletionF>::max(); } + [[nodiscard]] static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { + return __barrier_base<_CompletionF>::max(); + } _LIBCPP_HIDE_FROM_ABI explicit barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF()) : __b_(__count, std::move(__completion)) { diff --git a/libcxx/include/initializer_list b/libcxx/include/initializer_list index 00e0d4ea4a2df..44cd45668388b 100644 --- a/libcxx/include/initializer_list +++ b/libcxx/include/initializer_list @@ -78,11 +78,17 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 initializer_list() _NOEXCEPT : __begin_(nullptr), __size_(0) {} - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 size_t size() const _NOEXCEPT { return __size_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 size_t size() const _NOEXCEPT { + return __size_; + } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* begin() const _NOEXCEPT { return __begin_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* begin() const _NOEXCEPT { + return __begin_; + } - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* end() const _NOEXCEPT { return __begin_ + __size_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Ep* end() const _NOEXCEPT { + return __begin_ + __size_; + } }; template diff --git a/libcxx/include/latch b/libcxx/include/latch index c3b8f62e9b50e..33268d9655f25 100644 --- a/libcxx/include/latch +++ b/libcxx/include/latch @@ -70,7 +70,9 @@ class latch { atomic __a_; public: - static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { return numeric_limits::max(); } + [[nodiscard]] static _LIBCPP_HIDE_FROM_ABI constexpr ptrdiff_t max() noexcept { + return numeric_limits::max(); + } inline _LIBCPP_HIDE_FROM_ABI constexpr explicit latch(ptrdiff_t __expected) : __a_(__expected) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( @@ -97,7 +99,7 @@ public: if (__old == __update) __a_.notify_all(); } - inline _LIBCPP_HIDE_FROM_ABI bool try_wait() const noexcept { + [[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI bool try_wait() const noexcept { auto __value = __a_.load(memory_order_acquire); return try_wait_impl(__value); } diff --git a/libcxx/include/mutex b/libcxx/include/mutex index 0b81f1bb1c8a6..bec0185ede21a 100644 --- a/libcxx/include/mutex +++ b/libcxx/include/mutex @@ -229,12 +229,12 @@ public: recursive_mutex& operator=(const recursive_mutex&) = delete; void lock(); - bool try_lock() _NOEXCEPT; + [[__nodiscard__]] bool try_lock() _NOEXCEPT; void unlock() _NOEXCEPT; typedef __libcpp_recursive_mutex_t* native_handle_type; - _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() { return &__m_; } }; class _LIBCPP_EXPORTED_FROM_ABI timed_mutex { @@ -251,14 +251,14 @@ public: public: void lock(); - bool try_lock() _NOEXCEPT; + [[__nodiscard__]] bool try_lock() _NOEXCEPT; template - _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { return try_lock_until(chrono::steady_clock::now() + __d); } template - _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { using namespace chrono; unique_lock __lk(__m_); bool __no_timeout = _Clock::now() < __t; @@ -288,14 +288,14 @@ public: recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; void lock(); - bool try_lock() _NOEXCEPT; + [[__nodiscard__]] bool try_lock() _NOEXCEPT; template - _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_for(const chrono::duration<_Rep, _Period>& __d) { return try_lock_until(chrono::steady_clock::now() + __d); } template - _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) { using namespace chrono; __thread_id __id = this_thread::get_id(); unique_lock __lk(__m_); @@ -320,7 +320,7 @@ public: }; template -_LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1) { +[[__nodiscard__]] _LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1) { unique_lock<_L0> __u0(__l0, try_to_lock_t()); if (__u0.owns_lock()) { if (__l1.try_lock()) { @@ -335,7 +335,8 @@ _LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, # ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NO_THREAD_SAFETY_ANALYSIS _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1, _L2& __l2, _L3&... __l3) { +[[__nodiscard__]] _LIBCPP_NO_THREAD_SAFETY_ANALYSIS + _LIBCPP_HIDE_FROM_ABI int try_lock(_L0& __l0, _L1& __l1, _L2& __l2, _L3&... __l3) { int __r = 0; unique_lock<_L0> __u0(__l0, try_to_lock); if (__u0.owns_lock()) { diff --git a/libcxx/include/queue b/libcxx/include/queue index b4b79fb25a35f..a1686bc7c502e 100644 --- a/libcxx/include/queue +++ b/libcxx/include/queue @@ -376,12 +376,12 @@ public: # endif // _LIBCPP_CXX03_LANG [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool empty() const { return c.empty(); } - _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } - _LIBCPP_HIDE_FROM_ABI reference front() { return c.front(); } - _LIBCPP_HIDE_FROM_ABI const_reference front() const { return c.front(); } - _LIBCPP_HIDE_FROM_ABI reference back() { return c.back(); } - _LIBCPP_HIDE_FROM_ABI const_reference back() const { return c.back(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference front() { return c.front(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference front() const { return c.front(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI reference back() { return c.back(); } + [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI const_reference back() const { return c.back(); } _LIBCPP_HIDE_FROM_ABI void push(const value_type& __v) { c.push_back(__v); } # ifndef _LIBCPP_CXX03_LANG @@ -664,8 +664,10 @@ public: # endif [[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI bool empty() const { return c.empty(); } - _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } - _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI const_reference top() const { return c.front(); } + [[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI size_type size() const { return c.size(); } + [[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI const_reference top() const { + return c.front(); + } _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI void push(const value_type& __v); # ifndef _LIBCPP_CXX03_LANG diff --git a/libcxx/include/semaphore b/libcxx/include/semaphore index 99c4ad24b35ec..1f19d50e32af7 100644 --- a/libcxx/include/semaphore +++ b/libcxx/include/semaphore @@ -133,7 +133,7 @@ class counting_semaphore { public: static_assert(__least_max_value >= 0, "The least maximum value must be a positive number"); - static constexpr ptrdiff_t max() noexcept { return __least_max_value; } + [[nodiscard]] static constexpr ptrdiff_t max() noexcept { return __least_max_value; } _LIBCPP_HIDE_FROM_ABI constexpr explicit counting_semaphore(ptrdiff_t __count) : __semaphore_(__count) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( @@ -156,12 +156,12 @@ public: } _LIBCPP_HIDE_FROM_ABI void acquire() { __semaphore_.acquire(); } template - _LIBCPP_HIDE_FROM_ABI bool try_acquire_for(chrono::duration<_Rep, _Period> const& __rel_time) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool try_acquire_for(chrono::duration<_Rep, _Period> const& __rel_time) { return __semaphore_.try_acquire_for(chrono::duration_cast(__rel_time)); } - _LIBCPP_HIDE_FROM_ABI bool try_acquire() { return __semaphore_.try_acquire(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool try_acquire() { return __semaphore_.try_acquire(); } template - _LIBCPP_HIDE_FROM_ABI bool try_acquire_until(chrono::time_point<_Clock, _Duration> const& __abs_time) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool try_acquire_until(chrono::time_point<_Clock, _Duration> const& __abs_time) { auto const __current = _Clock::now(); if (__current >= __abs_time) return try_acquire(); diff --git a/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp b/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp index c473879d87b71..e32c0a96c1261 100644 --- a/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp +++ b/libcxx/test/libcxx/containers/views/mdspan/extents/assert.obs.pass.cpp @@ -35,28 +35,28 @@ int main(int, char**) { // mismatch of static extent { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(0); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(0); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(0); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(0); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(2); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(2); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(2); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(2); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(2); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(2); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(2); }()), "extents access: index must be less than rank"); } { std::extents e; - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.extent(9); }()), "extents access: index must be less than rank"); - TEST_LIBCPP_ASSERT_FAILURE(([=] { e.static_extent(9); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.extent(9); }()), "extents access: index must be less than rank"); + TEST_LIBCPP_ASSERT_FAILURE(([=] { (void)e.static_extent(9); }()), "extents access: index must be less than rank"); } // check that static_extent works in constant expression with assertions enabled diff --git a/libcxx/test/libcxx/containers/views/mdspan/nodiscard.verify.cpp b/libcxx/test/libcxx/containers/views/mdspan/nodiscard.verify.cpp new file mode 100644 index 0000000000000..71f53f8f1f737 --- /dev/null +++ b/libcxx/test/libcxx/containers/views/mdspan/nodiscard.verify.cpp @@ -0,0 +1,62 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// REQUIRES: std-at-least-c++23 + +// + +// Check that functions are marked [[nodiscard]] + +#include +#include +#include + +void test() { + // mdspan<> + + std::array data; + std::mdspan> mdsp{data.data(), 2, 2}; + + mdsp[0, 1]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::array arr{0, 1}; + mdsp[arr]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::span sp{arr}; + mdsp[sp]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + mdsp.rank(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.rank_dynamic(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.static_extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + mdsp.extents(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.data_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.mapping(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.accessor(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + mdsp.is_always_unique(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_always_exhaustive(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_always_strided(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_unique(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_exhaustive(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.is_strided(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + mdsp.stride(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + // Helpers + + std::extents ex; + ex.rank(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + ex.rank_dynamic(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + ex.static_extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + ex.extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + std::dextents dex; + dex.rank(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + dex.rank_dynamic(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + dex.static_extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + dex.extent(0); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +} diff --git a/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp index 79b943b790d04..d569616b99bf4 100644 --- a/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/flat_map.nodiscard.verify.cpp @@ -6,15 +6,107 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20 +// REQUIRES: std-at-least-c++23 // // [[nodiscard]] bool empty() const noexcept; #include +#include -void f() { - std::flat_map c; - c.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +template +struct TransparentKey { + T t; + + constexpr explicit operator T() const { return t; } +}; + +struct TransparentCompare { + using is_transparent = void; // This makes the comparator transparent + + template + constexpr bool operator()(const T& t, const TransparentKey& transparent) const { + return t < transparent.t; + } + + template + constexpr bool operator()(const TransparentKey& transparent, const T& t) const { + return transparent.t < t; + } + + template + constexpr bool operator()(const T& t1, const T& t2) const { + return t1 < t2; + } +}; + +void test() { + std::flat_map fm; + const std::flat_map cfm{}; + + fm.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.rbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.rend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.cbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.cend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.crbegin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.crend(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.max_size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + int key = 0; + TransparentKey tkey; + + std::flat_map nfm; + nfm[key]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm[std::move(key)]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm[std::move(tkey)]; // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.at(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.at(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.at(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.at(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + std::move(fm).extract(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.key_comp(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.value_comp(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.keys(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.values(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.find(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.find(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.find(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.find(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.count(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.count(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.contains(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.contains(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.contains(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.contains(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.lower_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.lower_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.lower_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.lower_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.upper_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.upper_bound(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.upper_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.upper_bound(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + fm.equal_range(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.equal_range(key); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + fm.equal_range(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cfm.equal_range(tkey); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} } diff --git a/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp index 77d3367cc2f4a..da1f9ff3f01f6 100644 --- a/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/queue.nodiscard.verify.cpp @@ -12,12 +12,24 @@ #include -void test_queue() { - std::queue queue; - queue.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} -} +void test() { + { + std::queue q; + const std::queue cq{}; + + q.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + q.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + q.front(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cq.front(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + q.back(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + cq.back(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + + { + std::priority_queue pq; -void test_priority_queue() { - std::priority_queue priority_queue; - priority_queue.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + pq.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + pq.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + pq.top(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } } diff --git a/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp index 524be96736bad..2f5b3ba0fc642 100644 --- a/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/utility.nodiscard.verify.cpp @@ -10,8 +10,6 @@ // check that functions are marked [[nodiscard]] -// clang-format off - #include #include "test_macros.h" @@ -19,15 +17,33 @@ void test() { int i = 0; - std::forward(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} - std::forward(1); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} - std::move(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} - std::move_if_noexcept(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::forward(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::forward(1); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::move(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::move_if_noexcept(i); #if TEST_STD_VER >= 17 std::as_const(i); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} #endif +#if TEST_STD_VER >= 20 + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_not_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_less(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_greater(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_less_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::cmp_greater_equal(94, 82); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::in_range(49); +#endif + #if TEST_STD_VER >= 23 enum E { Apple, Orange } e = Apple; std::to_underlying(e); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} diff --git a/libcxx/test/libcxx/language.support/nodiscard.verify.cpp b/libcxx/test/libcxx/language.support/nodiscard.verify.cpp new file mode 100644 index 0000000000000..b87b04ad9f1ef --- /dev/null +++ b/libcxx/test/libcxx/language.support/nodiscard.verify.cpp @@ -0,0 +1,91 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++03 + +// Check that functions are marked [[nodiscard]] + +#include +#include +#include +#include + +#include "test_macros.h" + +void test() { +#if TEST_STD_VER >= 20 + { // + int x = 94; + int y = 82; + auto oRes = x <=> y; + + std::is_eq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_neq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_lt(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_lteq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_gt(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + std::is_gteq(oRes); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +#endif + +#if TEST_STD_VER >= 20 + { // + struct EmptyPromise { + } promise; + + { + std::coroutine_handle cr{}; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.address(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::coroutine_handle::from_address(&promise); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.done(); + + std::hash> hash; + hash(cr); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { + std::coroutine_handle cr; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::coroutine_handle::from_promise(promise); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.address(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::coroutine_handle::from_address(&promise); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.done(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.promise(); + } + { + std::coroutine_handle cr = std::noop_coroutine(); + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.done(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.promise(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cr.address(); + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::noop_coroutine(); + } + } +#endif + + { // + std::initializer_list il{94, 82, 49}; + + il.size(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + il.begin(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + il.end(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +} diff --git a/libcxx/test/libcxx/thread/nodiscard.verify.cpp b/libcxx/test/libcxx/thread/nodiscard.verify.cpp new file mode 100644 index 0000000000000..19e43f88db700 --- /dev/null +++ b/libcxx/test/libcxx/thread/nodiscard.verify.cpp @@ -0,0 +1,144 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++03 +// UNSUPPORTED: no-threads + +// Check that functions are marked [[nodiscard]] + +#include +#include +#include +#include +#include +#include + +#include "test_macros.h" + +const auto timePoint = std::chrono::steady_clock::now(); + +void test() { + // Threads + { + std::thread th; + + th.joinable(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + th.get_id(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + th.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + th.hardware_concurrency(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +#if TEST_STD_VER >= 20 + { + std::jthread jt; + + jt.joinable(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.get_id(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.get_stop_source(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.get_stop_token(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + jt.hardware_concurrency(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } +#endif + + // Mutual exclusion + + { // + std::mutex m; + + m.try_lock(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + m.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { + std::recursive_mutex m; + + m.try_lock(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + m.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { + std::timed_mutex m; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_until(timePoint); + } + { + std::recursive_timed_mutex m; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + m.try_lock_until(timePoint); + } + { + std::mutex m1; + std::mutex m2; + std::mutex m3; + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::try_lock(m1, m2); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + std::try_lock(m1, m2, m3); + } + + // Condition variables + + { // + std::condition_variable cv; + + cv.native_handle(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + +#if TEST_STD_VER >= 20 + + // Semaphores + + { // + std::counting_semaphore<> cs{0}; + + cs.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cs.try_acquire_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cs.try_acquire(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + cs.try_acquire_until(timePoint); + + std::binary_semaphore bs{0}; + + bs.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + bs.try_acquire_for(std::chrono::nanoseconds{82}); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + bs.try_acquire(); + // expected-warning@+1 {{ignoring return value of function declared with 'nodiscard' attribute}} + bs.try_acquire_until(timePoint); + } + + // Latches and barriers + + { // + std::barrier<> b{94}; + + b.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + { // + std::latch l{94}; + + l.max(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + l.try_wait(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} + } + +#endif +} diff --git a/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp b/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp index e8ea20b345e34..98629364654b6 100644 --- a/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp +++ b/libcxx/test/std/containers/container.adaptors/flat.map/flat.map.access/index_transparent.pass.cpp @@ -97,7 +97,7 @@ constexpr bool test() { TransparentComparator c(transparent_used); std::flat_map m(std::sorted_unique, {{1, 1}, {2, 2}, {3, 3}}, c); assert(!transparent_used); - m[ConvertibleTransparent{3}]; + (void)m[ConvertibleTransparent{3}]; assert(transparent_used); } { diff --git a/libcxx/test/std/thread/thread.jthread/nodiscard.verify.cpp b/libcxx/test/std/thread/thread.jthread/nodiscard.verify.cpp deleted file mode 100644 index 2ef5cf874da90..0000000000000 --- a/libcxx/test/std/thread/thread.jthread/nodiscard.verify.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// UNSUPPORTED: no-threads -// UNSUPPORTED: c++03, c++11, c++14, c++17 - -// [[nodiscard]] bool joinable() const noexcept; -// [[nodiscard]] id get_id() const noexcept; -// [[nodiscard]] native_handle_type native_handle(); -// [[nodiscard]] stop_source get_stop_source() noexcept; -// [[nodiscard]] stop_token get_stop_token() const noexcept; -// [[nodiscard]] static unsigned int hardware_concurrency() noexcept; - -#include - -void test() { - std::jthread jt; - jt.joinable(); // expected-warning {{ignoring return value of function}} - jt.get_id(); // expected-warning {{ignoring return value of function}} - jt.native_handle(); // expected-warning {{ignoring return value of function}} - jt.get_stop_source(); // expected-warning {{ignoring return value of function}} - jt.get_stop_token(); // expected-warning {{ignoring return value of function}} - jt.hardware_concurrency(); // expected-warning {{ignoring return value of function}} -} diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp index 32b20993af67c..28c817c54c85d 100644 --- a/lld/MachO/Driver.cpp +++ b/lld/MachO/Driver.cpp @@ -41,6 +41,7 @@ #include "llvm/Object/Archive.h" #include "llvm/Option/ArgList.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Parallel.h" #include "llvm/Support/Path.h" @@ -53,6 +54,10 @@ #include "llvm/TextAPI/Architecture.h" #include "llvm/TextAPI/PackedVersion.h" +#if !_WIN32 +#include +#endif + using namespace llvm; using namespace llvm::MachO; using namespace llvm::object; @@ -292,12 +297,13 @@ struct DeferredFile { using DeferredFiles = std::vector; #if LLVM_ENABLE_THREADS -class SerialBackgroundQueue { +class SerialBackgroundWorkQueue { std::deque> queue; std::thread *running; std::mutex mutex; public: + std::atomic_bool stopAllWork = false; void queueWork(std::function work) { mutex.lock(); if (running && queue.empty()) { @@ -312,7 +318,7 @@ class SerialBackgroundQueue { queue.emplace_back(std::move(work)); if (!running) running = new std::thread([&]() { - while (true) { + while (!stopAllWork) { mutex.lock(); if (queue.empty()) { mutex.unlock(); @@ -331,6 +337,8 @@ class SerialBackgroundQueue { } }; +static SerialBackgroundWorkQueue pageInQueue; + // Most input files have been mapped but not yet paged in. // This code forces the page-ins on multiple threads so // the process is not stalled waiting on disk buffer i/o. @@ -339,8 +347,8 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) { static const size_t largeArchive = 10 * 1024 * 1024; #ifndef NDEBUG using namespace std::chrono; - std::atomic_int numDeferedFilesTouched = 0; static std::atomic_uint64_t totalBytes = 0; + std::atomic_int numDeferedFilesAdvised = 0; auto t0 = high_resolution_clock::now(); #endif @@ -348,24 +356,34 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) { const StringRef &buff = deferredFile.buffer.getBuffer(); if (buff.size() > largeArchive) return; + #ifndef NDEBUG totalBytes += buff.size(); - numDeferedFilesTouched += 1; + numDeferedFilesAdvised += 1; #endif - +#if _WIN32 // Reference all file's mmap'd pages to load them into memory. - for (const char *page = buff.data(), *end = page + buff.size(); page < end; - page += pageSize) { + for (const char *page = buff.data(), *end = page + buff.size(); + page < end && !pageInQueue.stopAllWork; page += pageSize) { [[maybe_unused]] volatile char t = *page; (void)t; } +#else +#define DEBUG_TYPE "lld-madvise" + auto aligned = + llvm::alignDown(reinterpret_cast(buff.data()), pageSize); + if (madvise((void *)aligned, buff.size(), MADV_WILLNEED) < 0) + LLVM_DEBUG(llvm::dbgs() << "madvise error: " << strerror(errno) << "\n"); +#undef DEBUG_TYPE +#endif }; + { // Create scope for waiting for the taskGroup std::atomic_size_t index = 0; llvm::parallel::TaskGroup taskGroup; for (int w = 0; w < config->readWorkers; w++) taskGroup.spawn([&index, &preloadDeferredFile, &deferred]() { - while (true) { + while (!pageInQueue.stopAllWork) { size_t localIndex = index.fetch_add(1); if (localIndex >= deferred.size()) break; @@ -373,17 +391,17 @@ void multiThreadedPageInBackground(DeferredFiles &deferred) { } }); } + #ifndef NDEBUG auto dt = high_resolution_clock::now() - t0; if (Process::GetEnv("LLD_MULTI_THREAD_PAGE")) llvm::dbgs() << "multiThreadedPageIn " << totalBytes << "/" - << numDeferedFilesTouched << "/" << deferred.size() << "/" + << numDeferedFilesAdvised << "/" << deferred.size() << "/" << duration_cast(dt).count() / 1000. << "\n"; #endif } static void multiThreadedPageIn(const DeferredFiles &deferred) { - static SerialBackgroundQueue pageInQueue; pageInQueue.queueWork([=]() { DeferredFiles files = deferred; multiThreadedPageInBackground(files); @@ -489,7 +507,7 @@ static InputFile *processFile(std::optional buffer, continue; } - if (archiveContents) + if (config->readWorkers && archiveContents) archiveContents->push_back({path, isLazy, *mb}); if (!hasObjCSection(*mb)) continue; @@ -1447,6 +1465,8 @@ static void createFiles(const InputArgList &args) { multiThreadedPageIn(archiveContents); for (auto *archive : archives) archive->addLazySymbols(); + + pageInQueue.stopAllWork = true; } #endif } @@ -1845,8 +1865,8 @@ bool link(ArrayRef argsArr, llvm::raw_ostream &stdoutOS, "'"); config->readWorkers = workers; #else - error(arg->getSpelling() + - ": option unavailable because lld was not built with thread support"); + warn(arg->getSpelling() + + ": option unavailable because lld was not built with thread support"); #endif } if (auto *arg = args.getLastArg(OPT_threads_eq)) { diff --git a/lld/MachO/InputFiles.cpp b/lld/MachO/InputFiles.cpp index efcffc9c53383..81caef5f15ae1 100644 --- a/lld/MachO/InputFiles.cpp +++ b/lld/MachO/InputFiles.cpp @@ -217,7 +217,8 @@ std::optional macho::readFile(StringRef path) { if (entry != cachedReads.end()) return entry->second; - ErrorOr> mbOrErr = MemoryBuffer::getFile(path); + ErrorOr> mbOrErr = + MemoryBuffer::getFile(path, false, /*RequiresNullTerminator=*/false); if (std::error_code ec = mbOrErr.getError()) { error("cannot open " + path + ": " + ec.message()); return std::nullopt; diff --git a/lld/test/MachO/read-workers.s b/lld/test/MachO/read-workers.s index 294106ba0b084..4d2f88c2a757c 100644 --- a/lld/test/MachO/read-workers.s +++ b/lld/test/MachO/read-workers.s @@ -1,7 +1,4 @@ # REQUIRES: x86 && thread_support -## Sometimes fails, particularly in an ASAN build, do not run until -## https://github.com/llvm/llvm-project/pull/157917 addresses the cause. -# UNSUPPORTED: target={{.*}} # RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t.o ## A non-negative integer is allowed. diff --git a/lldb/bindings/interface/SBTargetExtensions.i b/lldb/bindings/interface/SBTargetExtensions.i index 43125d8970615..ef1093b03ced9 100644 --- a/lldb/bindings/interface/SBTargetExtensions.i +++ b/lldb/bindings/interface/SBTargetExtensions.i @@ -190,6 +190,7 @@ STRING_EXTENSION_LEVEL_OUTSIDE(SBTarget, lldb::eDescriptionLevelBrief) byte_order = property(GetByteOrder, None, doc='''A read only property that returns an lldb enumeration value (lldb.eByteOrderLittle, lldb.eByteOrderBig, lldb.eByteOrderInvalid) that represents the byte order for this target.''') addr_size = property(GetAddressByteSize, None, doc='''A read only property that returns the size in bytes of an address for this target.''') triple = property(GetTriple, None, doc='''A read only property that returns the target triple (arch-vendor-os) for this target as a string.''') + arch_name = property(GetArchName, None, doc='''A read only property that returns the architecture name for this target as a string.''') data_byte_size = property(GetDataByteSize, None, doc='''A read only property that returns the size in host bytes of a byte in the data address space for this target.''') code_byte_size = property(GetCodeByteSize, None, doc='''A read only property that returns the size in host bytes of a byte in the code address space for this target.''') platform = property(GetPlatform, None, doc='''A read only property that returns the platform associated with with this target.''') diff --git a/lldb/examples/python/templates/scripted_process.py b/lldb/examples/python/templates/scripted_process.py index 49059d533f38a..b4232f632a30a 100644 --- a/lldb/examples/python/templates/scripted_process.py +++ b/lldb/examples/python/templates/scripted_process.py @@ -35,9 +35,7 @@ def __init__(self, exe_ctx, args): target = exe_ctx.target if isinstance(target, lldb.SBTarget) and target.IsValid(): self.target = target - triple = self.target.triple - if triple: - self.arch = triple.split("-")[0] + self.arch = target.arch_name self.dbg = target.GetDebugger() if isinstance(args, lldb.SBStructuredData) and args.IsValid(): self.args = args diff --git a/lldb/include/lldb/API/SBTarget.h b/lldb/include/lldb/API/SBTarget.h index 379a0bb7e9513..ce81ae46a0905 100644 --- a/lldb/include/lldb/API/SBTarget.h +++ b/lldb/include/lldb/API/SBTarget.h @@ -44,6 +44,7 @@ class LLDB_API SBTarget { eBroadcastBitWatchpointChanged = (1 << 3), eBroadcastBitSymbolsLoaded = (1 << 4), eBroadcastBitSymbolsChanged = (1 << 5), + eBroadcastBitNewTargetCreated = (1 << 6), }; // Constructors @@ -64,6 +65,10 @@ class LLDB_API SBTarget { static lldb::SBTarget GetTargetFromEvent(const lldb::SBEvent &event); + /// For eBroadcastBitNewTargetCreated events, returns the newly created + /// target. For other event types, returns an invalid SBTarget. + static lldb::SBTarget GetCreatedTargetFromEvent(const lldb::SBEvent &event); + static uint32_t GetNumModulesFromEvent(const lldb::SBEvent &event); static lldb::SBModule GetModuleAtIndexFromEvent(const uint32_t idx, @@ -353,6 +358,8 @@ class LLDB_API SBTarget { const char *GetTriple(); + const char *GetArchName(); + const char *GetABIName(); const char *GetLabel() const; @@ -365,6 +372,16 @@ class LLDB_API SBTarget { /// LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID if the target is invalid. lldb::user_id_t GetGloballyUniqueID() const; + /// Get the target session name for this target. + /// + /// The target session name provides a meaningful name for IDEs or tools to + /// display to help the user identify the origin and purpose of the target. + /// + /// \return + /// The target session name for this target, or nullptr if the target is + /// invalid or has no target session name. + const char *GetTargetSessionName() const; + SBError SetLabel(const char *label); /// Architecture opcode byte size width accessor diff --git a/lldb/include/lldb/Target/Target.h b/lldb/include/lldb/Target/Target.h index 908094bfd888d..c0fcda7c0d960 100644 --- a/lldb/include/lldb/Target/Target.h +++ b/lldb/include/lldb/Target/Target.h @@ -537,6 +537,7 @@ class Target : public std::enable_shared_from_this, eBroadcastBitWatchpointChanged = (1 << 3), eBroadcastBitSymbolsLoaded = (1 << 4), eBroadcastBitSymbolsChanged = (1 << 5), + eBroadcastBitNewTargetCreated = (1 << 6), }; // These two functions fill out the Broadcaster interface: @@ -556,6 +557,13 @@ class Target : public std::enable_shared_from_this, TargetEventData(const lldb::TargetSP &target_sp, const ModuleList &module_list); + // Constructor for eBroadcastBitNewTargetCreated events. For this event + // type: + // - target_sp is the parent target (the subject/broadcaster of the event) + // - created_target_sp is the newly created target + TargetEventData(const lldb::TargetSP &target_sp, + const lldb::TargetSP &created_target_sp); + ~TargetEventData() override; static llvm::StringRef GetFlavorString(); @@ -570,14 +578,23 @@ class Target : public std::enable_shared_from_this, static lldb::TargetSP GetTargetFromEvent(const Event *event_ptr); + // For eBroadcastBitNewTargetCreated events, returns the newly created + // target. For other event types, returns an invalid target. + static lldb::TargetSP GetCreatedTargetFromEvent(const Event *event_ptr); + static ModuleList GetModuleListFromEvent(const Event *event_ptr); const lldb::TargetSP &GetTarget() const { return m_target_sp; } + const lldb::TargetSP &GetCreatedTarget() const { + return m_created_target_sp; + } + const ModuleList &GetModuleList() const { return m_module_list; } private: lldb::TargetSP m_target_sp; + lldb::TargetSP m_created_target_sp; ModuleList m_module_list; TargetEventData(const TargetEventData &) = delete; @@ -622,6 +639,30 @@ class Target : public std::enable_shared_from_this, /// requirements. llvm::Error SetLabel(llvm::StringRef label); + /// Get the target session name for this target. + /// + /// Provides a meaningful name for IDEs or tools to display for dynamically + /// created targets. Defaults to "Session {ID}" based on the globally unique + /// ID. + /// + /// \return + /// The target session name for this target. + llvm::StringRef GetTargetSessionName() { return m_target_session_name; } + + /// Set the target session name for this target. + /// + /// This should typically be set along with the event + /// eBroadcastBitNewTargetCreated. Useful for scripts or triggers that + /// automatically create targets and want to provide meaningful names that + /// IDEs or other tools can display to help users identify the origin and + /// purpose of each target. + /// + /// \param[in] target_session_name + /// The target session name to set for this target. + void SetTargetSessionName(llvm::StringRef target_session_name) { + m_target_session_name = target_session_name.str(); + } + /// Find a binary on the system and return its Module, /// or return an existing Module that is already in the Target. /// @@ -1719,8 +1760,11 @@ class Target : public std::enable_shared_from_this, bool m_is_dummy_target; unsigned m_next_persistent_variable_index = 0; lldb::user_id_t m_target_unique_id = - LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID; /// The globally unique ID + LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID; ///< The globally unique ID /// assigned to this target + std::string m_target_session_name; ///< The target session name for this + /// target, used to name debugging + /// sessions in DAP. /// An optional \a lldb_private::Trace object containing processor trace /// information of this target. lldb::TraceSP m_trace_sp; diff --git a/lldb/packages/Python/lldbsuite/test/builders/darwin.py b/lldb/packages/Python/lldbsuite/test/builders/darwin.py index a023bda3ad801..eebe0ef47fd85 100644 --- a/lldb/packages/Python/lldbsuite/test/builders/darwin.py +++ b/lldb/packages/Python/lldbsuite/test/builders/darwin.py @@ -60,7 +60,7 @@ def get_triple_str(arch, vendor, os, version, env): component = [arch, vendor, os + version] if env: - components.append(env) + component.append(env) return "-".join(component) diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py index 459b7ab89dbef..35a4f8934e961 100644 --- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py +++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py @@ -785,6 +785,8 @@ def request_attach( *, program: Optional[str] = None, pid: Optional[int] = None, + debuggerId: Optional[int] = None, + targetId: Optional[int] = None, waitFor=False, initCommands: Optional[list[str]] = None, preRunCommands: Optional[list[str]] = None, @@ -804,6 +806,10 @@ def request_attach( args_dict["pid"] = pid if program is not None: args_dict["program"] = program + if debuggerId is not None: + args_dict["debuggerId"] = debuggerId + if targetId is not None: + args_dict["targetId"] = targetId if waitFor: args_dict["waitFor"] = waitFor args_dict["initCommands"] = self.init_commands diff --git a/lldb/source/API/SBTarget.cpp b/lldb/source/API/SBTarget.cpp index 98d10aa07c53f..578a7bdf7433d 100644 --- a/lldb/source/API/SBTarget.cpp +++ b/lldb/source/API/SBTarget.cpp @@ -128,6 +128,12 @@ SBTarget SBTarget::GetTargetFromEvent(const SBEvent &event) { return Target::TargetEventData::GetTargetFromEvent(event.get()); } +SBTarget SBTarget::GetCreatedTargetFromEvent(const SBEvent &event) { + LLDB_INSTRUMENT_VA(event); + + return Target::TargetEventData::GetCreatedTargetFromEvent(event.get()); +} + uint32_t SBTarget::GetNumModulesFromEvent(const SBEvent &event) { LLDB_INSTRUMENT_VA(event); @@ -1614,6 +1620,19 @@ const char *SBTarget::GetTriple() { return nullptr; } +const char *SBTarget::GetArchName() { + LLDB_INSTRUMENT_VA(this); + + if (TargetSP target_sp = GetSP()) { + llvm::StringRef arch_name = + target_sp->GetArchitecture().GetTriple().getArchName(); + ConstString const_arch_name(arch_name); + + return const_arch_name.GetCString(); + } + return nullptr; +} + const char *SBTarget::GetABIName() { LLDB_INSTRUMENT_VA(this); @@ -1641,6 +1660,14 @@ lldb::user_id_t SBTarget::GetGloballyUniqueID() const { return LLDB_INVALID_GLOBALLY_UNIQUE_TARGET_ID; } +const char *SBTarget::GetTargetSessionName() const { + LLDB_INSTRUMENT_VA(this); + + if (TargetSP target_sp = GetSP()) + return ConstString(target_sp->GetTargetSessionName()).AsCString(); + return nullptr; +} + SBError SBTarget::SetLabel(const char *label) { LLDB_INSTRUMENT_VA(this, label); diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp index 30bca639060e6..7f880d223d6c3 100644 --- a/lldb/source/Commands/CommandObjectTarget.cpp +++ b/lldb/source/Commands/CommandObjectTarget.cpp @@ -60,6 +60,7 @@ #include "lldb/lldb-forward.h" #include "lldb/lldb-private-enumerations.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/FrontendActions.h" diff --git a/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt b/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt index 01d588ff6a78b..759a7c4dd14fb 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt +++ b/lldb/source/Plugins/ExpressionParser/Clang/CMakeLists.txt @@ -51,10 +51,10 @@ add_lldb_library(lldbPluginExpressionParserClang CLANG_LIBS clangAST clangCodeGen - clangDriver clangEdit clangFrontend clangLex + clangOptions clangParse clangRewrite clangRewriteFrontend diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp index 6de851081598f..660a21e3c6a8d 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangHost.cpp @@ -10,7 +10,7 @@ #include "clang/Basic/Version.h" #include "clang/Config/config.h" -#include "clang/Driver/Driver.h" +#include "clang/Options/OptionUtils.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" @@ -53,7 +53,7 @@ static bool DefaultComputeClangResourceDirectory(FileSpec &lldb_shlib_spec, std::string raw_path = lldb_shlib_spec.GetPath(); llvm::StringRef parent_dir = llvm::sys::path::parent_path(raw_path); static const std::string clang_resource_path = - clang::driver::Driver::GetResourcesPath("bin/lldb"); + clang::GetResourcesPath("bin/lldb"); static const llvm::StringRef kResourceDirSuffixes[] = { // LLVM.org's build of LLDB uses the clang resource directory placed diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp index e37c84efefdc9..ce8dc50b84a31 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp @@ -10,6 +10,7 @@ #include "clang/Basic/DiagnosticFrontend.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Driver/CreateInvocationFromArgs.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendActions.h" #include "clang/Frontend/TextDiagnosticPrinter.h" diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp index 69edea503002e..5879b8f4795ab 100644 --- a/lldb/source/Target/Process.cpp +++ b/lldb/source/Target/Process.cpp @@ -2452,8 +2452,10 @@ size_t Process::ReadScalarIntegerFromMemory(addr_t addr, uint32_t byte_size, scalar = data.GetMaxU32(&offset, byte_size); else scalar = data.GetMaxU64(&offset, byte_size); - if (is_signed) + if (is_signed) { + scalar.MakeSigned(); scalar.SignExtend(byte_size * 8); + } return bytes_read; } } else { diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp index 5f2e7af54044a..3a936b85f6339 100644 --- a/lldb/source/Target/Target.cpp +++ b/lldb/source/Target/Target.cpp @@ -185,6 +185,8 @@ Target::Target(Debugger &debugger, const ArchSpec &target_arch, m_internal_stop_hooks(), m_latest_stop_hook_id(0), m_valid(true), m_suppress_stop_hooks(false), m_is_dummy_target(is_dummy_target), m_target_unique_id(g_target_unique_id++), + m_target_session_name( + llvm::formatv("Session {0}", m_target_unique_id).str()), m_frame_recognizer_manager_up( std::make_unique()) { SetEventName(eBroadcastBitBreakpointChanged, "breakpoint-changed"); @@ -192,6 +194,7 @@ Target::Target(Debugger &debugger, const ArchSpec &target_arch, SetEventName(eBroadcastBitModulesUnloaded, "modules-unloaded"); SetEventName(eBroadcastBitWatchpointChanged, "watchpoint-changed"); SetEventName(eBroadcastBitSymbolsLoaded, "symbols-loaded"); + SetEventName(eBroadcastBitNewTargetCreated, "new-target-created"); CheckInWithManager(); @@ -2280,8 +2283,10 @@ size_t Target::ReadScalarIntegerFromMemory(const Address &addr, uint32_t byte_si else scalar = data.GetMaxU64(&offset, byte_size); - if (is_signed) + if (is_signed) { + scalar.MakeSigned(); scalar.SignExtend(byte_size * 8); + } return bytes_read; } } else { @@ -2296,7 +2301,7 @@ int64_t Target::ReadSignedIntegerFromMemory(const Address &addr, int64_t fail_value, Status &error, bool force_live_memory) { Scalar scalar; - if (ReadScalarIntegerFromMemory(addr, integer_byte_size, false, scalar, error, + if (ReadScalarIntegerFromMemory(addr, integer_byte_size, true, scalar, error, force_live_memory)) return scalar.SLongLong(fail_value); return fail_value; @@ -5198,6 +5203,11 @@ Target::TargetEventData::TargetEventData(const lldb::TargetSP &target_sp, const ModuleList &module_list) : EventData(), m_target_sp(target_sp), m_module_list(module_list) {} +Target::TargetEventData::TargetEventData( + const lldb::TargetSP &target_sp, const lldb::TargetSP &created_target_sp) + : EventData(), m_target_sp(target_sp), + m_created_target_sp(created_target_sp), m_module_list() {} + Target::TargetEventData::~TargetEventData() = default; llvm::StringRef Target::TargetEventData::GetFlavorString() { @@ -5232,6 +5242,15 @@ TargetSP Target::TargetEventData::GetTargetFromEvent(const Event *event_ptr) { return target_sp; } +TargetSP +Target::TargetEventData::GetCreatedTargetFromEvent(const Event *event_ptr) { + TargetSP created_target_sp; + const TargetEventData *event_data = GetEventDataFromEvent(event_ptr); + if (event_data) + created_target_sp = event_data->m_created_target_sp; + return created_target_sp; +} + ModuleList Target::TargetEventData::GetModuleListFromEvent(const Event *event_ptr) { ModuleList module_list; diff --git a/lldb/source/Utility/RegisterValue.cpp b/lldb/source/Utility/RegisterValue.cpp index c28c9e2d4d106..4d762dc80e7f5 100644 --- a/lldb/source/Utility/RegisterValue.cpp +++ b/lldb/source/Utility/RegisterValue.cpp @@ -196,9 +196,7 @@ Status RegisterValue::SetValueFromData(const RegisterInfo ®_info, SetUInt64(src.GetMaxU64(&src_offset, src_len)); else { std::vector native_endian_src(src_len, 0); - src.ExtractBytes(src_offset, src_len, - llvm::sys::IsLittleEndianHost ? eByteOrderLittle - : eByteOrderBig, + src.ExtractBytes(src_offset, src_len, endian::InlHostByteOrder(), native_endian_src.data()); llvm::APInt uint = llvm::APInt::getZero(src_len * 8); llvm::LoadIntFromMemory(uint, native_endian_src.data(), src_len); diff --git a/lldb/test/API/python_api/target/TestTargetAPI.py b/lldb/test/API/python_api/target/TestTargetAPI.py index d346563af18e2..d3c64d87375b4 100644 --- a/lldb/test/API/python_api/target/TestTargetAPI.py +++ b/lldb/test/API/python_api/target/TestTargetAPI.py @@ -105,6 +105,24 @@ def test_resolve_file_address(self): self.assertIsNotNone(data_section2) self.assertEqual(data_section.name, data_section2.name) + def test_get_arch_name(self): + d = {"EXE": "b.out"} + self.build(dictionary=d) + self.setTearDownCleanup(dictionary=d) + target = self.create_simple_target("b.out") + + arch_name = target.arch_name + self.assertTrue(len(arch_name) > 0, "Got an arch name") + + # Test consistency with triple. + triple = target.triple + self.assertTrue(len(triple) > 0, "Got a triple") + self.assertEqual( + triple.split("-")[0], + arch_name, + "Arch name is equal to the first item of the triple", + ) + def test_get_ABIName(self): d = {"EXE": "b.out"} self.build(dictionary=d) diff --git a/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py b/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py index 2db00a5ac3b6f..d6287397a93b0 100644 --- a/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py +++ b/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py @@ -75,3 +75,38 @@ def test_by_name_waitFor(self): self.spawn_thread.start() self.attach(program=program, waitFor=True) self.continue_and_verify_pid() + + def test_attach_with_missing_debuggerId_or_targetId(self): + """ + Test that attaching with only one of debuggerId/targetId specified + fails with the expected error message. + """ + self.build_and_create_debug_adapter() + + # Test with only targetId specified (no debuggerId) + resp = self.attach(targetId=99999, expectFailure=True) + self.assertFalse(resp["success"]) + self.assertIn( + "Both debuggerId and targetId must be specified together", + resp["body"]["error"]["format"], + ) + + def test_attach_with_invalid_debuggerId_and_targetId(self): + """ + Test that attaching with both debuggerId and targetId specified but + invalid fails with an appropriate error message. + """ + self.build_and_create_debug_adapter() + + # Attach with both debuggerId=9999 and targetId=99999 (both invalid). + # Since debugger ID 9999 likely doesn't exist in the global registry, + # we expect a validation error. + resp = self.attach(debuggerId=9999, targetId=99999, expectFailure=True) + self.assertFalse(resp["success"]) + error_msg = resp["body"]["error"]["format"] + # Either error is acceptable - both indicate the debugger reuse + # validation is working correctly + self.assertTrue( + "Unable to find existing debugger" in error_msg + or f"Expected debugger/target not found error, got: {error_msg}" + ) diff --git a/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py b/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py index b487257b6414d..7e60dd22f1084 100644 --- a/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py +++ b/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py @@ -36,3 +36,54 @@ def test_startDebugging(self): request = self.dap_server.reverse_requests[0] self.assertEqual(request["arguments"]["configuration"]["pid"], 321) self.assertEqual(request["arguments"]["request"], "attach") + + def test_startDebugging_debugger_reuse(self): + """ + Tests that debugger and target IDs can be passed through startDebugging + for debugger reuse. This verifies the infrastructure for child DAP + sessions to reuse the parent's debugger and attach to an existing target. + """ + program = self.getBuildArtifact("a.out") + source = "main.c" + self.build_and_launch(program) + + breakpoint_line = line_number(source, "// breakpoint") + self.set_source_breakpoints(source, [breakpoint_line]) + self.continue_to_next_stop() + + # Use mock IDs to test the infrastructure + # In a real scenario, these would come from the parent session + test_debugger_id = 1 + test_target_id = 100 + + # Send a startDebugging request with debuggerId and targetId + # This simulates creating a child DAP session that reuses the debugger + self.dap_server.request_evaluate( + f'`lldb-dap start-debugging attach \'{{"debuggerId":{test_debugger_id},"targetId":{test_target_id}}}\'', + context="repl", + ) + + self.continue_to_exit() + + # Verify the reverse request was sent with the correct IDs + self.assertEqual( + len(self.dap_server.reverse_requests), + 1, + "Should have received one startDebugging reverse request", + ) + + request = self.dap_server.reverse_requests[0] + self.assertEqual(request["command"], "startDebugging") + self.assertEqual(request["arguments"]["request"], "attach") + + config = request["arguments"]["configuration"] + self.assertEqual( + config["debuggerId"], + test_debugger_id, + "Reverse request should include debugger ID", + ) + self.assertEqual( + config["targetId"], + test_target_id, + "Reverse request should include target ID", + ) diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt index fa940b7b73943..237c3043dbbc7 100644 --- a/lldb/tools/lldb-dap/CMakeLists.txt +++ b/lldb/tools/lldb-dap/CMakeLists.txt @@ -10,6 +10,7 @@ add_lldb_library(lldbDAP DAP.cpp DAPError.cpp DAPLog.cpp + DAPSessionManager.cpp EventHelper.cpp ExceptionBreakpoint.cpp FifoFiles.cpp diff --git a/lldb/tools/lldb-dap/DAP.cpp b/lldb/tools/lldb-dap/DAP.cpp index d4203a2f00983..465d85a07bd34 100644 --- a/lldb/tools/lldb-dap/DAP.cpp +++ b/lldb/tools/lldb-dap/DAP.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "DAP.h" +#include "CommandPlugins.h" #include "DAPLog.h" #include "EventHelper.h" #include "ExceptionBreakpoint.h" @@ -242,10 +243,12 @@ llvm::Error DAP::ConfigureIO(std::FILE *overrideOut, std::FILE *overrideErr) { } void DAP::StopEventHandlers() { - if (event_thread.joinable()) { - broadcaster.BroadcastEventByType(eBroadcastBitStopEventThread); - event_thread.join(); - } + event_thread_sp.reset(); + + // Clean up expired event threads from the session manager. + DAPSessionManager::GetInstance().ReleaseExpiredEventThreads(); + + // Still handle the progress thread normally since it's per-DAP instance. if (progress_event_thread.joinable()) { broadcaster.BroadcastEventByType(eBroadcastBitStopProgressThread); progress_event_thread.join(); @@ -816,7 +819,8 @@ void DAP::SetTarget(const lldb::SBTarget target) { lldb::SBTarget::eBroadcastBitModulesLoaded | lldb::SBTarget::eBroadcastBitModulesUnloaded | lldb::SBTarget::eBroadcastBitSymbolsLoaded | - lldb::SBTarget::eBroadcastBitSymbolsChanged); + lldb::SBTarget::eBroadcastBitSymbolsChanged | + lldb::SBTarget::eBroadcastBitNewTargetCreated); listener.StartListeningForEvents(this->broadcaster, eBroadcastBitStopEventThread); } @@ -1303,13 +1307,99 @@ protocol::Capabilities DAP::GetCustomCapabilities() { } void DAP::StartEventThread() { - event_thread = std::thread(&DAP::EventThread, this); + // Get event thread for this debugger (creates it if it doesn't exist). + event_thread_sp = DAPSessionManager::GetInstance().GetEventThreadForDebugger( + debugger, this); } void DAP::StartProgressEventThread() { progress_event_thread = std::thread(&DAP::ProgressEventThread, this); } +void DAP::StartEventThreads() { + if (clientFeatures.contains(eClientFeatureProgressReporting)) + StartProgressEventThread(); + + StartEventThread(); +} + +llvm::Error DAP::InitializeDebugger(int debugger_id, + lldb::user_id_t target_id) { + // Find the existing debugger by ID + debugger = lldb::SBDebugger::FindDebuggerWithID(debugger_id); + if (!debugger.IsValid()) { + return llvm::createStringError( + "Unable to find existing debugger for debugger ID"); + } + + // Find the target within the debugger by its globally unique ID + lldb::SBTarget target = debugger.FindTargetByGloballyUniqueID(target_id); + if (!target.IsValid()) { + return llvm::createStringError( + "Unable to find existing target for target ID"); + } + + // Set the target for this DAP session. + SetTarget(target); + StartEventThreads(); + return llvm::Error::success(); +} + +llvm::Error DAP::InitializeDebugger() { + debugger = lldb::SBDebugger::Create(/*argument_name=*/false); + + // Configure input/output/error file descriptors. + debugger.SetInputFile(in); + target = debugger.GetDummyTarget(); + + llvm::Expected out_fd = out.GetWriteFileDescriptor(); + if (!out_fd) + return out_fd.takeError(); + debugger.SetOutputFile(lldb::SBFile(*out_fd, "w", false)); + + llvm::Expected err_fd = err.GetWriteFileDescriptor(); + if (!err_fd) + return err_fd.takeError(); + debugger.SetErrorFile(lldb::SBFile(*err_fd, "w", false)); + + // The sourceInitFile option is not part of the DAP specification. It is an + // extension used by the test suite to prevent sourcing `.lldbinit` and + // changing its behavior. The CLI flag --no-lldbinit takes precedence over + // the DAP parameter. + bool should_source_init_files = !no_lldbinit && sourceInitFile; + if (should_source_init_files) { + debugger.SkipLLDBInitFiles(false); + debugger.SkipAppInitFiles(false); + lldb::SBCommandReturnObject init; + auto interp = debugger.GetCommandInterpreter(); + interp.SourceInitFileInGlobalDirectory(init); + interp.SourceInitFileInHomeDirectory(init); + } + + // Run initialization commands. + if (llvm::Error err = RunPreInitCommands()) + return err; + + auto cmd = debugger.GetCommandInterpreter().AddMultiwordCommand( + "lldb-dap", "Commands for managing lldb-dap."); + + if (clientFeatures.contains(eClientFeatureStartDebuggingRequest)) { + cmd.AddCommand( + "start-debugging", new StartDebuggingCommand(*this), + "Sends a startDebugging request from the debug adapter to the client " + "to start a child debug session of the same type as the caller."); + } + + cmd.AddCommand( + "repl-mode", new ReplModeCommand(*this), + "Get or set the repl behavior of lldb-dap evaluation requests."); + cmd.AddCommand("send-event", new SendEventCommand(*this), + "Sends an DAP event to the client."); + + StartEventThreads(); + return llvm::Error::success(); +} + void DAP::ProgressEventThread() { lldb::SBListener listener("lldb-dap.progress.listener"); debugger.GetBroadcaster().AddListener( @@ -1370,213 +1460,6 @@ void DAP::ProgressEventThread() { } } -// All events from the debugger, target, process, thread and frames are -// received in this function that runs in its own thread. We are using a -// "FILE *" to output packets back to VS Code and they have mutexes in them -// them prevent multiple threads from writing simultaneously so no locking -// is required. -void DAP::EventThread() { - llvm::set_thread_name("lldb.DAP.client." + m_client_name + ".event_handler"); - lldb::SBListener listener = debugger.GetListener(); - broadcaster.AddListener(listener, eBroadcastBitStopEventThread); - debugger.GetBroadcaster().AddListener( - listener, lldb::eBroadcastBitError | lldb::eBroadcastBitWarning); - - // listen for thread events. - listener.StartListeningForEventClass( - debugger, lldb::SBThread::GetBroadcasterClassName(), - lldb::SBThread::eBroadcastBitStackChanged); - - lldb::SBEvent event; - bool done = false; - while (!done) { - if (!listener.WaitForEvent(UINT32_MAX, event)) - continue; - - const uint32_t event_mask = event.GetType(); - if (lldb::SBProcess::EventIsProcessEvent(event)) { - HandleProcessEvent(event, /*&process_exited=*/done); - } else if (lldb::SBTarget::EventIsTargetEvent(event)) { - HandleTargetEvent(event); - } else if (lldb::SBBreakpoint::EventIsBreakpointEvent(event)) { - HandleBreakpointEvent(event); - } else if (lldb::SBThread::EventIsThreadEvent(event)) { - HandleThreadEvent(event); - } else if (event_mask & lldb::eBroadcastBitError || - event_mask & lldb::eBroadcastBitWarning) { - HandleDiagnosticEvent(event); - } else if (event.BroadcasterMatchesRef(broadcaster)) { - if (event_mask & eBroadcastBitStopEventThread) { - done = true; - } - } - } -} - -void DAP::HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited) { - lldb::SBProcess process = lldb::SBProcess::GetProcessFromEvent(event); - const uint32_t event_mask = event.GetType(); - if (event_mask & lldb::SBProcess::eBroadcastBitStateChanged) { - auto state = lldb::SBProcess::GetStateFromEvent(event); - switch (state) { - case lldb::eStateConnected: - case lldb::eStateDetached: - case lldb::eStateInvalid: - case lldb::eStateUnloaded: - break; - case lldb::eStateAttaching: - case lldb::eStateCrashed: - case lldb::eStateLaunching: - case lldb::eStateStopped: - case lldb::eStateSuspended: - // Only report a stopped event if the process was not - // automatically restarted. - if (!lldb::SBProcess::GetRestartedFromEvent(event)) { - SendStdOutStdErr(*this, process); - if (llvm::Error err = SendThreadStoppedEvent(*this)) - DAP_LOG_ERROR(log, std::move(err), - "({1}) reporting thread stopped: {0}", m_client_name); - } - break; - case lldb::eStateRunning: - case lldb::eStateStepping: - WillContinue(); - SendContinuedEvent(*this); - break; - case lldb::eStateExited: - lldb::SBStream stream; - process.GetStatus(stream); - SendOutput(OutputType::Console, stream.GetData()); - - // When restarting, we can get an "exited" event for the process we - // just killed with the old PID, or even with no PID. In that case - // we don't have to terminate the session. - if (process.GetProcessID() == LLDB_INVALID_PROCESS_ID || - process.GetProcessID() == restarting_process_id) { - restarting_process_id = LLDB_INVALID_PROCESS_ID; - } else { - // Run any exit LLDB commands the user specified in the - // launch.json - RunExitCommands(); - SendProcessExitedEvent(*this, process); - SendTerminatedEvent(); - process_exited = true; - } - break; - } - } else if ((event_mask & lldb::SBProcess::eBroadcastBitSTDOUT) || - (event_mask & lldb::SBProcess::eBroadcastBitSTDERR)) { - SendStdOutStdErr(*this, process); - } -} - -void DAP::HandleTargetEvent(const lldb::SBEvent &event) { - const uint32_t event_mask = event.GetType(); - if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || - event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || - event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded || - event_mask & lldb::SBTarget::eBroadcastBitSymbolsChanged) { - const uint32_t num_modules = lldb::SBTarget::GetNumModulesFromEvent(event); - const bool remove_module = - event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded; - - // NOTE: Both mutexes must be acquired to prevent deadlock when - // handling `modules_request`, which also requires both locks. - lldb::SBMutex api_mutex = GetAPIMutex(); - const std::scoped_lock guard(api_mutex, - modules_mutex); - for (uint32_t i = 0; i < num_modules; ++i) { - lldb::SBModule module = - lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); - - std::optional p_module = - CreateModule(target, module, remove_module); - if (!p_module) - continue; - - const llvm::StringRef module_id = p_module->id; - - const bool module_exists = modules.contains(module_id); - if (remove_module && module_exists) { - modules.erase(module_id); - Send(protocol::Event{"module", - ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonRemoved}}); - } else if (module_exists) { - Send(protocol::Event{"module", - ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonChanged}}); - } else if (!remove_module) { - modules.insert(module_id); - Send(protocol::Event{"module", - ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonNew}}); - } - } - } -} - -void DAP::HandleBreakpointEvent(const lldb::SBEvent &event) { - const uint32_t event_mask = event.GetType(); - if (!(event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged)) - return; - - auto event_type = lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event); - auto bp = - Breakpoint(*this, lldb::SBBreakpoint::GetBreakpointFromEvent(event)); - // If the breakpoint was set through DAP, it will have the - // BreakpointBase::kDAPBreakpointLabel. Regardless of whether - // locations were added, removed, or resolved, the breakpoint isn't - // going away and the reason is always "changed". - if ((event_type & lldb::eBreakpointEventTypeLocationsAdded || - event_type & lldb::eBreakpointEventTypeLocationsRemoved || - event_type & lldb::eBreakpointEventTypeLocationsResolved) && - bp.MatchesName(BreakpointBase::kDAPBreakpointLabel)) { - // As the DAP client already knows the path of this breakpoint, we - // don't need to send it back as part of the "changed" event. This - // avoids sending paths that should be source mapped. Note that - // CreateBreakpoint doesn't apply source mapping and certain - // implementation ignore the source part of this event anyway. - protocol::Breakpoint protocol_bp = bp.ToProtocolBreakpoint(); - - // "source" is not needed here, unless we add adapter data to be - // saved by the client. - if (protocol_bp.source && !protocol_bp.source->adapterData) - protocol_bp.source = std::nullopt; - - llvm::json::Object body; - body.try_emplace("breakpoint", protocol_bp); - body.try_emplace("reason", "changed"); - - llvm::json::Object bp_event = CreateEventObject("breakpoint"); - bp_event.try_emplace("body", std::move(body)); - - SendJSON(llvm::json::Value(std::move(bp_event))); - } -} - -void DAP::HandleThreadEvent(const lldb::SBEvent &event) { - const uint32_t event_type = event.GetType(); - - if (event_type & lldb::SBThread::eBroadcastBitStackChanged) { - const lldb::SBThread evt_thread = lldb::SBThread::GetThreadFromEvent(event); - SendInvalidatedEvent(*this, {InvalidatedEventBody::eAreaStacks}, - evt_thread.GetThreadID()); - } -} - -void DAP::HandleDiagnosticEvent(const lldb::SBEvent &event) { - const lldb::SBStructuredData data = - lldb::SBDebugger::GetDiagnosticFromEvent(event); - if (!data.IsValid()) - return; - - std::string type = GetStringValue(data.GetValueForKey("type")); - std::string message = GetStringValue(data.GetValueForKey("message")); - SendOutput(OutputType::Important, - llvm::formatv("{0}: {1}", type, message).str()); -} - std::vector DAP::SetSourceBreakpoints( const protocol::Source &source, const std::optional> &breakpoints) { diff --git a/lldb/tools/lldb-dap/DAP.h b/lldb/tools/lldb-dap/DAP.h index 5d40341329f34..b5f2a57d9dc5f 100644 --- a/lldb/tools/lldb-dap/DAP.h +++ b/lldb/tools/lldb-dap/DAP.h @@ -10,6 +10,7 @@ #define LLDB_TOOLS_LLDB_DAP_DAP_H #include "DAPForward.h" +#include "DAPSessionManager.h" #include "ExceptionBreakpoint.h" #include "FunctionBreakpoint.h" #include "InstructionBreakpoint.h" @@ -47,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -81,6 +83,8 @@ enum class ReplMode { Variable = 0, Command, Auto }; using DAPTransport = lldb_private::transport::JSONTransport; struct DAP final : public DAPTransport::MessageHandler { + friend class DAPSessionManager; + /// Path to the lldb-dap binary itself. static llvm::StringRef debug_adapter_path; @@ -157,6 +161,11 @@ struct DAP final : public DAPTransport::MessageHandler { /// Whether to disable sourcing .lldbinit files. bool no_lldbinit; + /// Stores whether the initialize request specified a value for + /// lldbExtSourceInitFile. Used by the test suite to prevent sourcing + /// `.lldbinit` and changing its behavior. + bool sourceInitFile = true; + /// The initial thread list upon attaching. std::vector initial_thread_list; @@ -408,9 +417,33 @@ struct DAP final : public DAPTransport::MessageHandler { lldb::SBMutex GetAPIMutex() const { return target.GetAPIMutex(); } + /// Get the client name for this DAP session. + llvm::StringRef GetClientName() const { return m_client_name; } + void StartEventThread(); void StartProgressEventThread(); + /// DAP debugger initialization functions. + /// @{ + + /// Perform complete DAP initialization for a new debugger. + llvm::Error InitializeDebugger(); + + /// Perform complete DAP initialization by reusing an existing debugger and + /// target. + /// + /// \param[in] debugger_id + /// The ID of the existing debugger to reuse. + /// + /// \param[in] target_id + /// The globally unique ID of the existing target to reuse. + llvm::Error InitializeDebugger(int debugger_id, lldb::user_id_t target_id); + + /// Start event handling threads based on client capabilities. + void StartEventThreads(); + + /// @} + /// Sets the given protocol `breakpoints` in the given `source`, while /// removing any existing breakpoints in the given source if they are not in /// `breakpoint`. @@ -453,15 +486,11 @@ struct DAP final : public DAPTransport::MessageHandler { /// Event threads. /// @{ - void EventThread(); - void HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited); - void HandleTargetEvent(const lldb::SBEvent &event); - void HandleBreakpointEvent(const lldb::SBEvent &event); - void HandleThreadEvent(const lldb::SBEvent &event); - void HandleDiagnosticEvent(const lldb::SBEvent &event); void ProgressEventThread(); - std::thread event_thread; + /// Event thread is a shared pointer in case we have a multiple + /// DAP instances sharing the same event thread. + std::shared_ptr event_thread_sp; std::thread progress_event_thread; /// @} diff --git a/lldb/tools/lldb-dap/DAPForward.h b/lldb/tools/lldb-dap/DAPForward.h index 6620d5fd33642..e7fbbf669e7ec 100644 --- a/lldb/tools/lldb-dap/DAPForward.h +++ b/lldb/tools/lldb-dap/DAPForward.h @@ -28,6 +28,7 @@ namespace lldb { class SBAttachInfo; class SBBreakpoint; class SBBreakpointLocation; +class SBBroadcaster; class SBCommandInterpreter; class SBCommandReturnObject; class SBCommunication; diff --git a/lldb/tools/lldb-dap/DAPSessionManager.cpp b/lldb/tools/lldb-dap/DAPSessionManager.cpp new file mode 100644 index 0000000000000..d5440ffd64597 --- /dev/null +++ b/lldb/tools/lldb-dap/DAPSessionManager.cpp @@ -0,0 +1,142 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "DAPSessionManager.h" +#include "DAP.h" +#include "EventHelper.h" +#include "lldb/API/SBBroadcaster.h" +#include "lldb/API/SBEvent.h" +#include "lldb/API/SBTarget.h" +#include "lldb/Host/MainLoopBase.h" +#include "llvm/Support/Threading.h" +#include "llvm/Support/WithColor.h" + +#include +#include + +namespace lldb_dap { + +ManagedEventThread::ManagedEventThread(lldb::SBBroadcaster broadcaster, + std::thread t) + : m_broadcaster(broadcaster), m_event_thread(std::move(t)) {} + +ManagedEventThread::~ManagedEventThread() { + if (m_event_thread.joinable()) { + m_broadcaster.BroadcastEventByType(eBroadcastBitStopEventThread); + m_event_thread.join(); + } +} + +DAPSessionManager &DAPSessionManager::GetInstance() { + static std::once_flag initialized; + static DAPSessionManager *instance = + nullptr; // NOTE: intentional leak to avoid issues with C++ destructor + // chain + + std::call_once(initialized, []() { instance = new DAPSessionManager(); }); + + return *instance; +} + +void DAPSessionManager::RegisterSession(lldb_private::MainLoop *loop, + DAP *dap) { + std::lock_guard lock(m_sessions_mutex); + m_active_sessions[loop] = dap; +} + +void DAPSessionManager::UnregisterSession(lldb_private::MainLoop *loop) { + std::unique_lock lock(m_sessions_mutex); + m_active_sessions.erase(loop); + std::notify_all_at_thread_exit(m_sessions_condition, std::move(lock)); +} + +std::vector DAPSessionManager::GetActiveSessions() { + std::lock_guard lock(m_sessions_mutex); + std::vector sessions; + for (const auto &[loop, dap] : m_active_sessions) + if (dap) + sessions.emplace_back(dap); + return sessions; +} + +void DAPSessionManager::DisconnectAllSessions() { + std::lock_guard lock(m_sessions_mutex); + m_client_failed = false; + for (auto [loop, dap] : m_active_sessions) { + if (dap) { + if (llvm::Error error = dap->Disconnect()) { + m_client_failed = true; + llvm::WithColor::error() << "DAP client disconnected failed: " + << llvm::toString(std::move(error)) << "\n"; + } + loop->AddPendingCallback( + [](lldb_private::MainLoopBase &loop) { loop.RequestTermination(); }); + } + } +} + +llvm::Error DAPSessionManager::WaitForAllSessionsToDisconnect() { + std::unique_lock lock(m_sessions_mutex); + m_sessions_condition.wait(lock, [this] { return m_active_sessions.empty(); }); + + // Check if any disconnection failed and return appropriate error. + if (m_client_failed) + return llvm::make_error( + "disconnecting all clients failed", llvm::inconvertibleErrorCode()); + + return llvm::Error::success(); +} + +std::shared_ptr +DAPSessionManager::GetEventThreadForDebugger(lldb::SBDebugger debugger, + DAP *requesting_dap) { + lldb::user_id_t debugger_id = debugger.GetID(); + std::lock_guard lock(m_sessions_mutex); + + // Try to use shared event thread, if it exists. + if (auto it = m_debugger_event_threads.find(debugger_id); + it != m_debugger_event_threads.end()) { + if (std::shared_ptr thread_sp = it->second.lock()) + return thread_sp; + // Our weak pointer has expired. + m_debugger_event_threads.erase(it); + } + + // Create a new event thread and store it. + auto new_thread_sp = std::make_shared( + requesting_dap->broadcaster, + std::thread(EventThread, debugger, requesting_dap->broadcaster, + requesting_dap->m_client_name, requesting_dap->log)); + m_debugger_event_threads[debugger_id] = new_thread_sp; + return new_thread_sp; +} + +DAP *DAPSessionManager::FindDAPForTarget(lldb::SBTarget target) { + std::lock_guard lock(m_sessions_mutex); + + for (const auto &[loop, dap] : m_active_sessions) + if (dap && dap->target.IsValid() && dap->target == target) + return dap; + + return nullptr; +} + +void DAPSessionManager::ReleaseExpiredEventThreads() { + std::lock_guard lock(m_sessions_mutex); + for (auto it = m_debugger_event_threads.begin(); + it != m_debugger_event_threads.end();) { + // Check if the weak_ptr has expired (no DAP instances are using it + // anymore). + if (it->second.expired()) { + it = m_debugger_event_threads.erase(it); + } else { + ++it; + } + } +} + +} // namespace lldb_dap diff --git a/lldb/tools/lldb-dap/DAPSessionManager.h b/lldb/tools/lldb-dap/DAPSessionManager.h new file mode 100644 index 0000000000000..ad76b081ad78b --- /dev/null +++ b/lldb/tools/lldb-dap/DAPSessionManager.h @@ -0,0 +1,119 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the declarations of the DAPSessionManager and +/// ManagedEventThread classes, which are used to multiple concurrent DAP +/// sessions in a single lldb-dap process. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLDB_TOOLS_LLDB_DAP_DAPSESSIONMANAGER_H +#define LLDB_TOOLS_LLDB_DAP_DAPSESSIONMANAGER_H + +#include "lldb/API/SBBroadcaster.h" +#include "lldb/API/SBDebugger.h" +#include "lldb/API/SBTarget.h" +#include "lldb/Host/MainLoop.h" +#include "lldb/lldb-types.h" +#include "llvm/Support/Error.h" +#include +#include +#include +#include +#include +#include +#include + +namespace lldb_dap { + +// Forward declarations +struct DAP; + +class ManagedEventThread { +public: + // Constructor declaration + ManagedEventThread(lldb::SBBroadcaster broadcaster, std::thread t); + + ~ManagedEventThread(); + + ManagedEventThread(const ManagedEventThread &) = delete; + ManagedEventThread &operator=(const ManagedEventThread &) = delete; + +private: + lldb::SBBroadcaster m_broadcaster; + std::thread m_event_thread; +}; + +/// Global DAP session manager that manages multiple concurrent DAP sessions in +/// a single lldb-dap process. Handles session lifecycle tracking, coordinates +/// shared debugger event threads, and facilitates target handoff between +/// sessions for dynamically created targets. +class DAPSessionManager { +public: + /// Get the singleton instance of the DAP session manager. + static DAPSessionManager &GetInstance(); + + /// Register a DAP session. + void RegisterSession(lldb_private::MainLoop *loop, DAP *dap); + + /// Unregister a DAP session. Called by sessions when they complete their + /// disconnection, which unblocks WaitForAllSessionsToDisconnect(). + void UnregisterSession(lldb_private::MainLoop *loop); + + /// Get all active DAP sessions. + std::vector GetActiveSessions(); + + /// Disconnect all registered sessions by calling Disconnect() on + /// each and requesting their event loops to terminate. Used during + /// shutdown to force all sessions to begin disconnecting. + void DisconnectAllSessions(); + + /// Block until all sessions disconnect and unregister. Returns an error if + /// DisconnectAllSessions() was called and any disconnection failed. + llvm::Error WaitForAllSessionsToDisconnect(); + + /// Get or create event thread for a specific debugger. + std::shared_ptr + GetEventThreadForDebugger(lldb::SBDebugger debugger, DAP *requesting_dap); + + /// Find the DAP instance that owns the given target. + DAP *FindDAPForTarget(lldb::SBTarget target); + + /// Static convenience method for FindDAPForTarget. + static DAP *FindDAP(lldb::SBTarget target) { + return GetInstance().FindDAPForTarget(target); + } + + /// Clean up expired event threads from the collection. + void ReleaseExpiredEventThreads(); + +private: + DAPSessionManager() = default; + ~DAPSessionManager() = default; + + // Non-copyable and non-movable. + DAPSessionManager(const DAPSessionManager &) = delete; + DAPSessionManager &operator=(const DAPSessionManager &) = delete; + DAPSessionManager(DAPSessionManager &&) = delete; + DAPSessionManager &operator=(DAPSessionManager &&) = delete; + + bool m_client_failed = false; + std::mutex m_sessions_mutex; + std::condition_variable m_sessions_condition; + std::map m_active_sessions; + + /// Map from debugger ID to its event thread, used when multiple DAP sessions + /// share the same debugger instance. + std::map> + m_debugger_event_threads; +}; + +} // namespace lldb_dap + +#endif // LLDB_TOOLS_LLDB_DAP_DAPSESSIONMANAGER_H diff --git a/lldb/tools/lldb-dap/EventHelper.cpp b/lldb/tools/lldb-dap/EventHelper.cpp index 12d9e21c52ab3..bdb6bb55fe168 100644 --- a/lldb/tools/lldb-dap/EventHelper.cpp +++ b/lldb/tools/lldb-dap/EventHelper.cpp @@ -7,16 +7,28 @@ //===----------------------------------------------------------------------===// #include "EventHelper.h" +#include "Breakpoint.h" +#include "BreakpointBase.h" #include "DAP.h" #include "DAPError.h" +#include "DAPLog.h" +#include "DAPSessionManager.h" +#include "Handler/ResponseHandler.h" #include "JSONUtils.h" #include "LLDBUtils.h" #include "Protocol/ProtocolEvents.h" #include "Protocol/ProtocolRequests.h" #include "Protocol/ProtocolTypes.h" +#include "ProtocolUtils.h" +#include "lldb/API/SBEvent.h" #include "lldb/API/SBFileSpec.h" +#include "lldb/API/SBListener.h" #include "lldb/API/SBPlatform.h" +#include "lldb/API/SBStream.h" #include "llvm/Support/Error.h" +#include "llvm/Support/FormatVariadic.h" +#include "llvm/Support/Threading.h" +#include #include #if defined(_WIN32) @@ -306,4 +318,312 @@ void SendMemoryEvent(DAP &dap, lldb::SBValue variable) { dap.Send(protocol::Event{"memory", std::move(body)}); } +// Event handler functions that are called by EventThread. +// These handlers extract the necessary objects from events and find the +// appropriate DAP instance to handle them, maintaining compatibility with +// the original DAP::Handle*Event pattern while supporting multi-session +// debugging. + +void HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited, + Log *log) { + lldb::SBProcess process = lldb::SBProcess::GetProcessFromEvent(event); + + // Find the DAP instance that owns this process's target. + DAP *dap = DAPSessionManager::FindDAP(process.GetTarget()); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for process {0}", + process.GetProcessID()); + return; + } + + const uint32_t event_mask = event.GetType(); + + if (event_mask & lldb::SBProcess::eBroadcastBitStateChanged) { + auto state = lldb::SBProcess::GetStateFromEvent(event); + switch (state) { + case lldb::eStateConnected: + case lldb::eStateDetached: + case lldb::eStateInvalid: + case lldb::eStateUnloaded: + break; + case lldb::eStateAttaching: + case lldb::eStateCrashed: + case lldb::eStateLaunching: + case lldb::eStateStopped: + case lldb::eStateSuspended: + // Only report a stopped event if the process was not + // automatically restarted. + if (!lldb::SBProcess::GetRestartedFromEvent(event)) { + SendStdOutStdErr(*dap, process); + if (llvm::Error err = SendThreadStoppedEvent(*dap)) + DAP_LOG_ERROR(dap->log, std::move(err), + "({1}) reporting thread stopped: {0}", + dap->GetClientName()); + } + break; + case lldb::eStateRunning: + case lldb::eStateStepping: + dap->WillContinue(); + SendContinuedEvent(*dap); + break; + case lldb::eStateExited: + lldb::SBStream stream; + process.GetStatus(stream); + dap->SendOutput(OutputType::Console, stream.GetData()); + + // When restarting, we can get an "exited" event for the process we + // just killed with the old PID, or even with no PID. In that case + // we don't have to terminate the session. + if (process.GetProcessID() == LLDB_INVALID_PROCESS_ID || + process.GetProcessID() == dap->restarting_process_id) { + dap->restarting_process_id = LLDB_INVALID_PROCESS_ID; + } else { + // Run any exit LLDB commands the user specified in the + // launch.json + dap->RunExitCommands(); + SendProcessExitedEvent(*dap, process); + dap->SendTerminatedEvent(); + process_exited = true; + } + break; + } + } else if ((event_mask & lldb::SBProcess::eBroadcastBitSTDOUT) || + (event_mask & lldb::SBProcess::eBroadcastBitSTDERR)) { + SendStdOutStdErr(*dap, process); + } +} + +void HandleTargetEvent(const lldb::SBEvent &event, Log *log) { + lldb::SBTarget target = lldb::SBTarget::GetTargetFromEvent(event); + + // Find the DAP instance that owns this target. + DAP *dap = DAPSessionManager::FindDAP(target); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for target"); + return; + } + + const uint32_t event_mask = event.GetType(); + if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || + event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || + event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded || + event_mask & lldb::SBTarget::eBroadcastBitSymbolsChanged) { + const uint32_t num_modules = lldb::SBTarget::GetNumModulesFromEvent(event); + const bool remove_module = + event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded; + + // NOTE: Both mutexes must be acquired to prevent deadlock when + // handling `modules_request`, which also requires both locks. + lldb::SBMutex api_mutex = dap->GetAPIMutex(); + const std::scoped_lock guard(api_mutex, + dap->modules_mutex); + for (uint32_t i = 0; i < num_modules; ++i) { + lldb::SBModule module = + lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); + + std::optional p_module = + CreateModule(dap->target, module, remove_module); + if (!p_module) + continue; + + llvm::StringRef module_id = p_module->id; + + const bool module_exists = dap->modules.contains(module_id); + if (remove_module && module_exists) { + dap->modules.erase(module_id); + dap->Send(protocol::Event{ + "module", protocol::ModuleEventBody{ + std::move(p_module).value(), + protocol::ModuleEventBody::eReasonRemoved}}); + } else if (module_exists) { + dap->Send(protocol::Event{ + "module", protocol::ModuleEventBody{ + std::move(p_module).value(), + protocol::ModuleEventBody::eReasonChanged}}); + } else if (!remove_module) { + dap->modules.insert(module_id); + dap->Send(protocol::Event{ + "module", + protocol::ModuleEventBody{std::move(p_module).value(), + protocol::ModuleEventBody::eReasonNew}}); + } + } + } else if (event_mask & lldb::SBTarget::eBroadcastBitNewTargetCreated) { + // For NewTargetCreated events, GetTargetFromEvent returns the parent + // target, and GetCreatedTargetFromEvent returns the newly created target. + lldb::SBTarget created_target = + lldb::SBTarget::GetCreatedTargetFromEvent(event); + + if (!target.IsValid() || !created_target.IsValid()) { + DAP_LOG(log, "Received NewTargetCreated event but parent or " + "created target is invalid"); + return; + } + + // Send a startDebugging reverse request with the debugger and target + // IDs. The new DAP instance will use these IDs to find the existing + // debugger and target via FindDebuggerWithID and + // FindTargetByGloballyUniqueID. + llvm::json::Object configuration; + configuration.try_emplace("type", "lldb"); + configuration.try_emplace("debuggerId", + created_target.GetDebugger().GetID()); + configuration.try_emplace("targetId", created_target.GetGloballyUniqueID()); + configuration.try_emplace("name", created_target.GetTargetSessionName()); + + llvm::json::Object request; + request.try_emplace("request", "attach"); + request.try_emplace("configuration", std::move(configuration)); + + dap->SendReverseRequest("startDebugging", + std::move(request)); + } +} + +void HandleBreakpointEvent(const lldb::SBEvent &event, Log *log) { + const uint32_t event_mask = event.GetType(); + if (!(event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged)) + return; + + lldb::SBBreakpoint bp = lldb::SBBreakpoint::GetBreakpointFromEvent(event); + if (!bp.IsValid()) + return; + + // Find the DAP instance that owns this breakpoint's target. + DAP *dap = DAPSessionManager::FindDAP(bp.GetTarget()); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for breakpoint"); + return; + } + + auto event_type = lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event); + auto breakpoint = Breakpoint(*dap, bp); + // If the breakpoint was set through DAP, it will have the + // BreakpointBase::kDAPBreakpointLabel. Regardless of whether + // locations were added, removed, or resolved, the breakpoint isn't + // going away and the reason is always "changed". + if ((event_type & lldb::eBreakpointEventTypeLocationsAdded || + event_type & lldb::eBreakpointEventTypeLocationsRemoved || + event_type & lldb::eBreakpointEventTypeLocationsResolved) && + breakpoint.MatchesName(BreakpointBase::kDAPBreakpointLabel)) { + // As the DAP client already knows the path of this breakpoint, we + // don't need to send it back as part of the "changed" event. This + // avoids sending paths that should be source mapped. Note that + // CreateBreakpoint doesn't apply source mapping and certain + // implementation ignore the source part of this event anyway. + protocol::Breakpoint protocol_bp = breakpoint.ToProtocolBreakpoint(); + + // "source" is not needed here, unless we add adapter data to be + // saved by the client. + if (protocol_bp.source && !protocol_bp.source->adapterData) + protocol_bp.source = std::nullopt; + + llvm::json::Object body; + body.try_emplace("breakpoint", protocol_bp); + body.try_emplace("reason", "changed"); + + llvm::json::Object bp_event = CreateEventObject("breakpoint"); + bp_event.try_emplace("body", std::move(body)); + + dap->SendJSON(llvm::json::Value(std::move(bp_event))); + } +} + +void HandleThreadEvent(const lldb::SBEvent &event, Log *log) { + uint32_t event_type = event.GetType(); + + if (!(event_type & lldb::SBThread::eBroadcastBitStackChanged)) + return; + + lldb::SBThread thread = lldb::SBThread::GetThreadFromEvent(event); + if (!thread.IsValid()) + return; + + // Find the DAP instance that owns this thread's process/target. + DAP *dap = DAPSessionManager::FindDAP(thread.GetProcess().GetTarget()); + if (!dap) { + DAP_LOG(log, "Unable to find DAP instance for thread"); + return; + } + + SendInvalidatedEvent(*dap, {protocol::InvalidatedEventBody::eAreaStacks}, + thread.GetThreadID()); +} + +void HandleDiagnosticEvent(const lldb::SBEvent &event, Log *log) { + // Global debugger events - send to all DAP instances. + std::vector active_instances = + DAPSessionManager::GetInstance().GetActiveSessions(); + for (DAP *dap_instance : active_instances) { + if (!dap_instance) + continue; + + lldb::SBStructuredData data = + lldb::SBDebugger::GetDiagnosticFromEvent(event); + if (!data.IsValid()) + continue; + + std::string type = GetStringValue(data.GetValueForKey("type")); + std::string message = GetStringValue(data.GetValueForKey("message")); + dap_instance->SendOutput(OutputType::Important, + llvm::formatv("{0}: {1}", type, message).str()); + } +} + +// Note: EventThread() is architecturally different from the other functions in +// this file. While the functions above are event helpers that operate on a +// single DAP instance (taking `DAP &dap` as a parameter), EventThread() is a +// shared event processing loop that: +// 1. Listens to events from a shared debugger instance +// 2. Dispatches events to the appropriate handler, which internally finds the +// DAP instance using DAPSessionManager::FindDAP() +// 3. Handles events for multiple different DAP sessions +// This allows multiple DAP sessions to share a single debugger and event +// thread, which is essential for the target handoff mechanism where child +// processes/targets are debugged in separate DAP sessions. +// +// All events from the debugger, target, process, thread and frames are +// received in this function that runs in its own thread. We are using a +// "FILE *" to output packets back to VS Code and they have mutexes in them +// them prevent multiple threads from writing simultaneously so no locking +// is required. +void EventThread(lldb::SBDebugger debugger, lldb::SBBroadcaster broadcaster, + llvm::StringRef client_name, Log *log) { + llvm::set_thread_name("lldb.DAP.client." + client_name + ".event_handler"); + lldb::SBListener listener = debugger.GetListener(); + broadcaster.AddListener(listener, eBroadcastBitStopEventThread); + debugger.GetBroadcaster().AddListener( + listener, lldb::eBroadcastBitError | lldb::eBroadcastBitWarning); + + // listen for thread events. + listener.StartListeningForEventClass( + debugger, lldb::SBThread::GetBroadcasterClassName(), + lldb::SBThread::eBroadcastBitStackChanged); + + lldb::SBEvent event; + bool done = false; + while (!done) { + if (!listener.WaitForEvent(UINT32_MAX, event)) + continue; + + const uint32_t event_mask = event.GetType(); + if (lldb::SBProcess::EventIsProcessEvent(event)) { + HandleProcessEvent(event, /*&process_exited=*/done, log); + } else if (lldb::SBTarget::EventIsTargetEvent(event)) { + HandleTargetEvent(event, log); + } else if (lldb::SBBreakpoint::EventIsBreakpointEvent(event)) { + HandleBreakpointEvent(event, log); + } else if (lldb::SBThread::EventIsThreadEvent(event)) { + HandleThreadEvent(event, log); + } else if (event_mask & lldb::eBroadcastBitError || + event_mask & lldb::eBroadcastBitWarning) { + HandleDiagnosticEvent(event, log); + } else if (event.BroadcasterMatchesRef(broadcaster)) { + if (event_mask & eBroadcastBitStopEventThread) { + done = true; + } + } + } +} + } // namespace lldb_dap diff --git a/lldb/tools/lldb-dap/EventHelper.h b/lldb/tools/lldb-dap/EventHelper.h index be783d032a5ae..3beba2629b2e3 100644 --- a/lldb/tools/lldb-dap/EventHelper.h +++ b/lldb/tools/lldb-dap/EventHelper.h @@ -42,6 +42,26 @@ void SendInvalidatedEvent( void SendMemoryEvent(DAP &dap, lldb::SBValue variable); +/// Event thread function that handles debugger events for multiple DAP sessions +/// sharing the same debugger instance. This runs in its own thread and +/// dispatches events to the appropriate DAP instance. +/// +/// \param debugger The debugger instance to listen for events from. +/// \param broadcaster The broadcaster for stop event thread notifications. +/// \param client_name The client name for thread naming/logging purposes. +/// \param log The log instance for logging. +void EventThread(lldb::SBDebugger debugger, lldb::SBBroadcaster broadcaster, + llvm::StringRef client_name, Log *log); + +/// Event handler functions called by EventThread. +/// These handlers extract the necessary objects from events and find the +/// appropriate DAP instance to handle them. +void HandleProcessEvent(const lldb::SBEvent &event, bool &done, Log *log); +void HandleTargetEvent(const lldb::SBEvent &event, Log *log); +void HandleBreakpointEvent(const lldb::SBEvent &event, Log *log); +void HandleThreadEvent(const lldb::SBEvent &event, Log *log); +void HandleDiagnosticEvent(const lldb::SBEvent &event, Log *log); + } // namespace lldb_dap #endif diff --git a/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp index 490513fe8a0b8..24c0ca2111f40 100644 --- a/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp @@ -17,6 +17,7 @@ #include "lldb/lldb-defines.h" #include "llvm/Support/Error.h" #include "llvm/Support/FileSystem.h" +#include using namespace llvm; using namespace lldb_dap::protocol; @@ -29,14 +30,31 @@ namespace lldb_dap { /// Since attaching is debugger/runtime specific, the arguments for this request /// are not part of this specification. Error AttachRequestHandler::Run(const AttachRequestArguments &args) const { + // Initialize DAP debugger and related components if not sharing previously + // launched debugger. + std::optional debugger_id = args.debuggerId; + std::optional target_id = args.targetId; + + // Validate that both debugger_id and target_id are provided together. + if (debugger_id.has_value() != target_id.has_value()) { + return llvm::createStringError( + "Both debuggerId and targetId must be specified together for debugger " + "reuse, or both must be omitted to create a new debugger"); + } + + if (Error err = debugger_id && target_id + ? dap.InitializeDebugger(*debugger_id, *target_id) + : dap.InitializeDebugger()) + return err; + // Validate that we have a well formed attach request. if (args.attachCommands.empty() && args.coreFile.empty() && args.configuration.program.empty() && args.pid == LLDB_INVALID_PROCESS_ID && - args.gdbRemotePort == LLDB_DAP_INVALID_PORT) + args.gdbRemotePort == LLDB_DAP_INVALID_PORT && !target_id.has_value()) return make_error( "expected one of 'pid', 'program', 'attachCommands', " - "'coreFile' or 'gdb-remote-port' to be specified"); + "'coreFile', 'gdb-remote-port', or target_id to be specified"); // Check if we have mutually exclusive arguments. if ((args.pid != LLDB_INVALID_PROCESS_ID) && @@ -64,7 +82,18 @@ Error AttachRequestHandler::Run(const AttachRequestArguments &args) const { dap.ConfigureSourceMaps(); lldb::SBError error; - lldb::SBTarget target = dap.CreateTarget(error); + lldb::SBTarget target; + if (target_id) { + // Use the unique target ID to get the target. + target = dap.debugger.FindTargetByGloballyUniqueID(*target_id); + if (!target.IsValid()) { + error.SetErrorStringWithFormat("invalid target_id %lu in attach config", + *target_id); + } + } else { + target = dap.CreateTarget(error); + } + if (error.Fail()) return ToError(error); @@ -114,7 +143,7 @@ Error AttachRequestHandler::Run(const AttachRequestArguments &args) const { connect_url += std::to_string(args.gdbRemotePort); dap.target.ConnectRemote(listener, connect_url.c_str(), "gdb-remote", error); - } else { + } else if (!target_id.has_value()) { // Attach by pid or process name. lldb::SBAttachInfo attach_info; if (args.pid != LLDB_INVALID_PROCESS_ID) diff --git a/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp index 9069de4a3a690..53e1810a5b0e0 100644 --- a/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/InitializeRequestHandler.cpp @@ -21,63 +21,9 @@ using namespace lldb_dap::protocol; /// Initialize request; value of command field is 'initialize'. llvm::Expected InitializeRequestHandler::Run( const InitializeRequestArguments &arguments) const { + // Store initialization arguments for later use in Launch/Attach. dap.clientFeatures = arguments.supportedFeatures; - - // Do not source init files until in/out/err are configured. - dap.debugger = lldb::SBDebugger::Create(false); - dap.debugger.SetInputFile(dap.in); - dap.target = dap.debugger.GetDummyTarget(); - - llvm::Expected out_fd = dap.out.GetWriteFileDescriptor(); - if (!out_fd) - return out_fd.takeError(); - dap.debugger.SetOutputFile(lldb::SBFile(*out_fd, "w", false)); - - llvm::Expected err_fd = dap.err.GetWriteFileDescriptor(); - if (!err_fd) - return err_fd.takeError(); - dap.debugger.SetErrorFile(lldb::SBFile(*err_fd, "w", false)); - - auto interp = dap.debugger.GetCommandInterpreter(); - - // The sourceInitFile option is not part of the DAP specification. It is an - // extension used by the test suite to prevent sourcing `.lldbinit` and - // changing its behavior. The CLI flag --no-lldbinit takes precedence over - // the DAP parameter. - bool should_source_init_files = - !dap.no_lldbinit && arguments.lldbExtSourceInitFile.value_or(true); - if (should_source_init_files) { - dap.debugger.SkipLLDBInitFiles(false); - dap.debugger.SkipAppInitFiles(false); - lldb::SBCommandReturnObject init; - interp.SourceInitFileInGlobalDirectory(init); - interp.SourceInitFileInHomeDirectory(init); - } - - if (llvm::Error err = dap.RunPreInitCommands()) - return err; - - auto cmd = dap.debugger.GetCommandInterpreter().AddMultiwordCommand( - "lldb-dap", "Commands for managing lldb-dap."); - if (arguments.supportedFeatures.contains( - eClientFeatureStartDebuggingRequest)) { - cmd.AddCommand( - "start-debugging", new StartDebuggingCommand(dap), - "Sends a startDebugging request from the debug adapter to the client " - "to start a child debug session of the same type as the caller."); - } - cmd.AddCommand( - "repl-mode", new ReplModeCommand(dap), - "Get or set the repl behavior of lldb-dap evaluation requests."); - cmd.AddCommand("send-event", new SendEventCommand(dap), - "Sends an DAP event to the client."); - - if (arguments.supportedFeatures.contains(eClientFeatureProgressReporting)) - dap.StartProgressEventThread(); - - // Start our event thread so we can receive events from the debugger, target, - // process and more. - dap.StartEventThread(); + dap.sourceInitFile = arguments.lldbExtSourceInitFile.value_or(true); return dap.GetCapabilities(); } diff --git a/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp index 553cbeaf849e2..329f0a7bf6453 100644 --- a/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/LaunchRequestHandler.cpp @@ -22,6 +22,10 @@ namespace lldb_dap { /// Launch request; value of command field is 'launch'. Error LaunchRequestHandler::Run(const LaunchRequestArguments &arguments) const { + // Initialize DAP debugger. + if (Error err = dap.InitializeDebugger()) + return err; + // Validate that we have a well formed launch request. if (!arguments.launchCommands.empty() && arguments.console != protocol::eConsoleInternal) diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp index ac01cfb95dd41..d53a520ade39b 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp +++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp @@ -317,7 +317,9 @@ bool fromJSON(const json::Value &Params, AttachRequestArguments &ARA, O.mapOptional("waitFor", ARA.waitFor) && O.mapOptional("gdb-remote-port", ARA.gdbRemotePort) && O.mapOptional("gdb-remote-hostname", ARA.gdbRemoteHostname) && - O.mapOptional("coreFile", ARA.coreFile); + O.mapOptional("coreFile", ARA.coreFile) && + O.mapOptional("targetId", ARA.targetId) && + O.mapOptional("debuggerId", ARA.debuggerId); } bool fromJSON(const json::Value &Params, ContinueArguments &CA, json::Path P) { diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h index c1e1e93f1e44a..37fc2465f6a05 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h +++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h @@ -350,6 +350,12 @@ struct AttachRequestArguments { /// Path to the core file to debug. std::string coreFile; + /// Unique ID of an existing target to attach to. + std::optional targetId; + + /// ID of an existing debugger instance to use. + std::optional debuggerId; + /// @} }; bool fromJSON(const llvm::json::Value &, AttachRequestArguments &, diff --git a/lldb/tools/lldb-dap/package.json b/lldb/tools/lldb-dap/package.json index 05dce285dd592..8e07c550b88c3 100644 --- a/lldb/tools/lldb-dap/package.json +++ b/lldb/tools/lldb-dap/package.json @@ -778,6 +778,10 @@ "description": "Custom commands that are executed instead of attaching to a process ID or to a process by name. These commands may optionally create a new target and must perform an attach. A valid process must exist after these commands complete or the \"attach\" will fail.", "default": [] }, + "targetId": { + "type": "number", + "description": "The globally unique target id to attach to. Used when a target is dynamically created." + }, "initCommands": { "type": "array", "items": { diff --git a/lldb/tools/lldb-dap/tool/lldb-dap.cpp b/lldb/tools/lldb-dap/tool/lldb-dap.cpp index f10ed12344cbd..27516b2a25678 100644 --- a/lldb/tools/lldb-dap/tool/lldb-dap.cpp +++ b/lldb/tools/lldb-dap/tool/lldb-dap.cpp @@ -445,12 +445,8 @@ static llvm::Error serveConnection( g_connection_timeout_time_point, connection_timeout_seconds.value()); std::condition_variable dap_sessions_condition; - std::mutex dap_sessions_mutex; - std::map dap_sessions; unsigned int clientCount = 0; - auto handle = listener->Accept(g_loop, [=, &dap_sessions_condition, - &dap_sessions_mutex, &dap_sessions, - &clientCount]( + auto handle = listener->Accept(g_loop, [=, &clientCount]( std::unique_ptr sock) { // Reset the keep alive timer, because we won't be killing the server // while this connection is being served. @@ -464,8 +460,7 @@ static llvm::Error serveConnection( // Move the client into a background thread to unblock accepting the next // client. - std::thread client([=, &dap_sessions_condition, &dap_sessions_mutex, - &dap_sessions]() { + std::thread client([=]() { llvm::set_thread_name(client_name + ".runloop"); MainLoop loop; Transport transport(client_name, log, io, io); @@ -478,10 +473,8 @@ static llvm::Error serveConnection( return; } - { - std::scoped_lock lock(dap_sessions_mutex); - dap_sessions[&loop] = &dap; - } + // Register the DAP session with the global manager. + DAPSessionManager::GetInstance().RegisterSession(&loop, &dap); if (auto Err = dap.Loop()) { llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), @@ -490,10 +483,8 @@ static llvm::Error serveConnection( } DAP_LOG(log, "({0}) client disconnected", client_name); - std::unique_lock lock(dap_sessions_mutex); - dap_sessions.erase(&loop); - std::notify_all_at_thread_exit(dap_sessions_condition, std::move(lock)); - + // Unregister the DAP session from the global manager. + DAPSessionManager::GetInstance().UnregisterSession(&loop); // Start the countdown to kill the server at the end of each connection. if (connection_timeout_seconds) TrackConnectionTimeout(g_loop, g_connection_timeout_mutex, @@ -516,29 +507,11 @@ static llvm::Error serveConnection( log, "lldb-dap server shutdown requested, disconnecting remaining clients..."); - bool client_failed = false; - { - std::scoped_lock lock(dap_sessions_mutex); - for (auto [loop, dap] : dap_sessions) { - if (llvm::Error error = dap->Disconnect()) { - client_failed = true; - llvm::WithColor::error() << "DAP client disconnected failed: " - << llvm::toString(std::move(error)) << "\n"; - } - loop->AddPendingCallback( - [](MainLoopBase &loop) { loop.RequestTermination(); }); - } - } - - // Wait for all clients to finish disconnecting. - std::unique_lock lock(dap_sessions_mutex); - dap_sessions_condition.wait(lock, [&] { return dap_sessions.empty(); }); - - if (client_failed) - return llvm::make_error( - "disconnecting all clients failed", llvm::inconvertibleErrorCode()); + // Disconnect all active sessions using the global manager. + DAPSessionManager::GetInstance().DisconnectAllSessions(); - return llvm::Error::success(); + // Wait for all clients to finish disconnecting and return any errors. + return DAPSessionManager::GetInstance().WaitForAllSessionsToDisconnect(); } int main(int argc, char *argv[]) { @@ -775,6 +748,10 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } + // Register the DAP session with the global manager for stdio mode. + // This is needed for the event handling to find the correct DAP instance. + DAPSessionManager::GetInstance().RegisterSession(&loop, &dap); + // used only by TestVSCode_redirection_to_console.py if (getenv("LLDB_DAP_TEST_STDOUT_STDERR_REDIRECTION") != nullptr) redirection_test(); @@ -784,7 +761,9 @@ int main(int argc, char *argv[]) { llvm::toStringWithoutConsuming(Err)); llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "DAP session error: "); + DAPSessionManager::GetInstance().UnregisterSession(&loop); return EXIT_FAILURE; } + DAPSessionManager::GetInstance().UnregisterSession(&loop); return EXIT_SUCCESS; } diff --git a/lldb/unittests/DAP/CMakeLists.txt b/lldb/unittests/DAP/CMakeLists.txt index a478cf07eedb2..0f8e9db2fab31 100644 --- a/lldb/unittests/DAP/CMakeLists.txt +++ b/lldb/unittests/DAP/CMakeLists.txt @@ -1,6 +1,7 @@ add_lldb_unittest(DAPTests ClientLauncherTest.cpp DAPErrorTest.cpp + DAPSessionManagerTest.cpp DAPTest.cpp DAPTypesTest.cpp FifoFilesTest.cpp diff --git a/lldb/unittests/DAP/DAPSessionManagerTest.cpp b/lldb/unittests/DAP/DAPSessionManagerTest.cpp new file mode 100644 index 0000000000000..b840d31ef116d --- /dev/null +++ b/lldb/unittests/DAP/DAPSessionManagerTest.cpp @@ -0,0 +1,103 @@ +//===-- DAPSessionManagerTest.cpp ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "DAPSessionManager.h" +#include "TestBase.h" +#include "lldb/API/SBDebugger.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using namespace lldb_dap; +using namespace lldb; +using namespace lldb_dap_tests; + +class DAPSessionManagerTest : public DAPTestBase {}; + +TEST_F(DAPSessionManagerTest, GetInstanceReturnsSameSingleton) { + DAPSessionManager &instance1 = DAPSessionManager::GetInstance(); + DAPSessionManager &instance2 = DAPSessionManager::GetInstance(); + + EXPECT_EQ(&instance1, &instance2); +} + +// UnregisterSession uses std::notify_all_at_thread_exit, so it must be called +// from a separate thread to properly release the mutex on thread exit. +TEST_F(DAPSessionManagerTest, RegisterAndUnregisterSession) { + DAPSessionManager &manager = DAPSessionManager::GetInstance(); + + // Initially not registered. + std::vector sessions_before = manager.GetActiveSessions(); + EXPECT_EQ( + std::count(sessions_before.begin(), sessions_before.end(), dap.get()), 0); + + manager.RegisterSession(&loop, dap.get()); + + // Should be in active sessions after registration. + std::vector sessions_after = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions_after.begin(), sessions_after.end(), dap.get()), + 1); + + // Unregister. + std::thread unregister_thread([&]() { manager.UnregisterSession(&loop); }); + + unregister_thread.join(); + + // There should no longer be active sessions. + std::vector sessions_final = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions_final.begin(), sessions_final.end(), dap.get()), + 0); +} + +TEST_F(DAPSessionManagerTest, DisconnectAllSessions) { + DAPSessionManager &manager = DAPSessionManager::GetInstance(); + + manager.RegisterSession(&loop, dap.get()); + + std::vector sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 1); + + manager.DisconnectAllSessions(); + + // DisconnectAllSessions shutdown but doesn't wait for + // sessions to complete or remove them from the active sessions map. + sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 1); + + std::thread unregister_thread([&]() { manager.UnregisterSession(&loop); }); + unregister_thread.join(); +} + +TEST_F(DAPSessionManagerTest, WaitForAllSessionsToDisconnect) { + DAPSessionManager &manager = DAPSessionManager::GetInstance(); + + manager.RegisterSession(&loop, dap.get()); + + std::vector sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 1); + + // Unregister after a delay to test blocking behavior. + std::thread unregister_thread([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + manager.UnregisterSession(&loop); + }); + + // WaitForAllSessionsToDisconnect should block until unregistered. + auto start = std::chrono::steady_clock::now(); + llvm::Error err = manager.WaitForAllSessionsToDisconnect(); + EXPECT_FALSE(err); + auto duration = std::chrono::steady_clock::now() - start; + + // Verify it waited at least 100ms. + EXPECT_GE(duration, std::chrono::milliseconds(100)); + + // Session should be unregistered now. + sessions = manager.GetActiveSessions(); + EXPECT_EQ(std::count(sessions.begin(), sessions.end(), dap.get()), 0); + + unregister_thread.join(); +} diff --git a/lldb/unittests/Expression/ClangParserTest.cpp b/lldb/unittests/Expression/ClangParserTest.cpp index fab4487c73719..c949026e87cd8 100644 --- a/lldb/unittests/Expression/ClangParserTest.cpp +++ b/lldb/unittests/Expression/ClangParserTest.cpp @@ -8,7 +8,7 @@ #include "clang/Basic/Version.h" #include "clang/Config/config.h" -#include "clang/Driver/Driver.h" +#include "clang/Options/OptionUtils.h" #include "Plugins/ExpressionParser/Clang/ClangHost.h" #include "TestingSupport/SubsystemRAII.h" @@ -43,7 +43,7 @@ TEST_F(ClangHostTest, ComputeClangResourceDirectory) { std::string path_to_liblldb = "C:\\foo\\bar\\lib\\"; #endif std::string path_to_clang_dir = - clang::driver::Driver::GetResourcesPath(path_to_liblldb + "liblldb"); + clang::GetResourcesPath(path_to_liblldb + "liblldb"); llvm::SmallString<256> path_to_clang_lib_dir_real; llvm::sys::fs::real_path(path_to_clang_dir, path_to_clang_lib_dir_real); diff --git a/lldb/unittests/Target/MemoryTest.cpp b/lldb/unittests/Target/MemoryTest.cpp index e444f68dc4871..131a3cabdd896 100644 --- a/lldb/unittests/Target/MemoryTest.cpp +++ b/lldb/unittests/Target/MemoryTest.cpp @@ -48,6 +48,8 @@ class DummyProcess : public Process { } Status DoDestroy() override { return {}; } void RefreshStateAfterStop() override {} + // Required by Target::ReadMemory() to call Process::ReadMemory() + bool IsAlive() override { return true; } size_t DoReadMemory(lldb::addr_t vm_addr, void *buf, size_t size, Status &error) override { if (m_bytes_left == 0) @@ -61,7 +63,7 @@ class DummyProcess : public Process { m_bytes_left -= size; } - memset(buf, 'B', num_bytes_to_write); + memset(buf, m_filler, num_bytes_to_write); return num_bytes_to_write; } bool DoUpdateThreadList(ThreadList &old_thread_list, @@ -72,8 +74,10 @@ class DummyProcess : public Process { // Test-specific additions size_t m_bytes_left; + int m_filler = 'B'; MemoryCache &GetMemoryCache() { return m_memory_cache; } void SetMaxReadSize(size_t size) { m_bytes_left = size; } + void SetFiller(int filler) { m_filler = filler; } }; } // namespace @@ -85,6 +89,18 @@ TargetSP CreateTarget(DebuggerSP &debugger_sp, ArchSpec &arch) { return target_sp; } +static ProcessSP CreateProcess(lldb::TargetSP target_sp) { + ListenerSP listener_sp(Listener::MakeListener("dummy")); + ProcessSP process_sp = std::make_shared(target_sp, listener_sp); + + struct TargetHack : public Target { + void SetProcess(ProcessSP process) { m_process_sp = process; } + }; + static_cast(target_sp.get())->SetProcess(process_sp); + + return process_sp; +} + TEST_F(MemoryTest, TesetMemoryCacheRead) { ArchSpec arch("x86_64-apple-macosx-"); @@ -96,8 +112,7 @@ TEST_F(MemoryTest, TesetMemoryCacheRead) { TargetSP target_sp = CreateTarget(debugger_sp, arch); ASSERT_TRUE(target_sp); - ListenerSP listener_sp(Listener::MakeListener("dummy")); - ProcessSP process_sp = std::make_shared(target_sp, listener_sp); + ProcessSP process_sp = CreateProcess(target_sp); ASSERT_TRUE(process_sp); DummyProcess *process = static_cast(process_sp.get()); @@ -227,6 +242,58 @@ TEST_F(MemoryTest, TesetMemoryCacheRead) { // old cache } +TEST_F(MemoryTest, TestReadInteger) { + ArchSpec arch("x86_64-apple-macosx-"); + + Platform::SetHostPlatform(PlatformRemoteMacOSX::CreateInstance(true, &arch)); + + DebuggerSP debugger_sp = Debugger::CreateInstance(); + ASSERT_TRUE(debugger_sp); + + TargetSP target_sp = CreateTarget(debugger_sp, arch); + ASSERT_TRUE(target_sp); + + ProcessSP process_sp = CreateProcess(target_sp); + ASSERT_TRUE(process_sp); + + DummyProcess *process = static_cast(process_sp.get()); + Status error; + + process->SetFiller(0xff); + process->SetMaxReadSize(256); + // The ReadSignedIntegerFromMemory() methods return int64_t. Check that they + // extend the sign correctly when reading 32-bit values. + EXPECT_EQ(-1, + target_sp->ReadSignedIntegerFromMemory(Address(0), 4, 0, error)); + EXPECT_EQ(-1, process->ReadSignedIntegerFromMemory(0, 4, 0, error)); + // Check reading 64-bit values as well. + EXPECT_EQ(-1, + target_sp->ReadSignedIntegerFromMemory(Address(0), 8, 0, error)); + EXPECT_EQ(-1, process->ReadSignedIntegerFromMemory(0, 8, 0, error)); + + // ReadUnsignedIntegerFromMemory() should not extend the sign. + EXPECT_EQ(0xffffffffULL, + target_sp->ReadUnsignedIntegerFromMemory(Address(0), 4, 0, error)); + EXPECT_EQ(0xffffffffULL, + process->ReadUnsignedIntegerFromMemory(0, 4, 0, error)); + EXPECT_EQ(0xffffffffffffffffULL, + target_sp->ReadUnsignedIntegerFromMemory(Address(0), 8, 0, error)); + EXPECT_EQ(0xffffffffffffffffULL, + process->ReadUnsignedIntegerFromMemory(0, 8, 0, error)); + + // Check reading positive values. + process->GetMemoryCache().Clear(); + process->SetFiller(0x7f); + process->SetMaxReadSize(256); + EXPECT_EQ(0x7f7f7f7fLL, + target_sp->ReadSignedIntegerFromMemory(Address(0), 4, 0, error)); + EXPECT_EQ(0x7f7f7f7fLL, process->ReadSignedIntegerFromMemory(0, 4, 0, error)); + EXPECT_EQ(0x7f7f7f7f7f7f7f7fLL, + target_sp->ReadSignedIntegerFromMemory(Address(0), 8, 0, error)); + EXPECT_EQ(0x7f7f7f7f7f7f7f7fLL, + process->ReadSignedIntegerFromMemory(0, 8, 0, error)); +} + /// A process class that, when asked to read memory from some address X, returns /// the least significant byte of X. class DummyReaderProcess : public Process { diff --git a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp index e28366e9f0432..6c74860971674 100644 --- a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp +++ b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp @@ -1008,13 +1008,13 @@ TEST_F(TestArm64InstEmulation, TestMidFunctionEpilogueAndBackwardsJump) { // row[4]: 24: CFA=sp+48 => fp= lr= // // This must come from +56 - // row[5]: 32: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-24], x23=[CFA-32] + // row[5]: 32: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-32], x23=[CFA-24] // row[6]: 40: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=same, x23 = same // row[6]: 44: CFA=sp+48 => fp=same lr=same x22=same, x23 = same // row[6]: 48: CFA=sp0 => fp=same lr=same x22=same, x23 = same // // row[x]: 52: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] - // row[x]: 56: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-24], x23=[CFA-32] + // row[x]: 56: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8] x22=[CFA-32], x23=[CFA-24] // clang-format on sample_range = AddressRange(0x1000, sizeof(data)); @@ -1059,7 +1059,7 @@ TEST_F(TestArm64InstEmulation, TestMidFunctionEpilogueAndBackwardsJump) { // <+28>: ret // <+32>: mov x23, #0x1 row = unwind_plan.GetRowForFunctionOffset(32); - // FIXME: EXPECT_NE(32, row->GetOffset()); + // FIXME: EXPECT_NE(28, row->GetOffset()); // Check that the state of this branch // <+16>: b.ne ; <+52> DO_SOMETHING_AND_GOTO_AFTER_EPILOGUE diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 33c23f2949765..02865f8a29c67 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -3234,6 +3234,24 @@ A "convergencectrl" operand bundle is only valid on a ``convergent`` operation. When present, the operand bundle must contain exactly one value of token type. See the :doc:`ConvergentOperations` document for details. +.. _deactivationsymbol: + +Deactivation Symbol Operand Bundles +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A ``"deactivation-symbol"`` operand bundle is valid on the following +instructions (AArch64 only): + +- Call to a normal function with ``notail`` attribute and a first argument and + return value of type ``ptr``. +- Call to ``llvm.ptrauth.sign`` or ``llvm.ptrauth.auth`` intrinsics. + +This operand bundle specifies that if the deactivation symbol is defined +to a valid value for the target, the marked instruction will return the +value of its first argument instead of calling the specified function +or intrinsic. This is achieved with ``PATCHINST`` relocations on the +target instructions (see the AArch64 psABI for details). + .. _moduleasm: Module-Level Inline Assembly @@ -5284,7 +5302,7 @@ need to refer to the actual function body. Pointer Authentication Constants -------------------------------- -``ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?)`` +``ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC[, ptr DS]?]?]?)`` A '``ptrauth``' constant represents a pointer with a cryptographic authentication signature embedded into some bits, as described in the @@ -5313,6 +5331,11 @@ Otherwise, the expression is equivalent to: %tmp2 = call i64 @llvm.ptrauth.sign(i64 ptrtoint (ptr CST to i64), i32 KEY, i64 %tmp1) %val = inttoptr i64 %tmp2 to ptr +If the deactivation symbol operand ``DS`` has a non-null value, +the semantics are as if a :ref:`deactivation-symbol operand bundle +` were added to the ``llvm.ptrauth.sign`` intrinsic +calls above, with ``DS`` as the only operand. + .. _constantexprs: Constant Expressions diff --git a/llvm/include/llvm/Analysis/Delinearization.h b/llvm/include/llvm/Analysis/Delinearization.h index 434cfb61699d6..e2be91ba84c53 100644 --- a/llvm/include/llvm/Analysis/Delinearization.h +++ b/llvm/include/llvm/Analysis/Delinearization.h @@ -133,9 +133,8 @@ bool findFixedSizeArrayDimensions(ScalarEvolution &SE, const SCEV *Expr, /// terms exist in the \p Expr. In other words, it assumes that the all step /// values are constant. /// -/// This function is intended to replace getIndexExpressionsFromGEP and -/// tryDelinearizeFixedSizeImpl. They rely on the GEP source element type so -/// that they will be removed in the future. +/// This function is intended to replace getIndexExpressionsFromGEP. They rely +/// on the GEP source element type so that will be removed in the future. bool delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr, SmallVectorImpl &Subscripts, SmallVectorImpl &Sizes, @@ -155,17 +154,6 @@ bool getIndexExpressionsFromGEP(ScalarEvolution &SE, SmallVectorImpl &Subscripts, SmallVectorImpl &Sizes); -/// Implementation of fixed size array delinearization. Try to delinearize -/// access function for a fixed size multi-dimensional array, by deriving -/// subscripts from GEP instructions. Returns true upon success and false -/// otherwise. \p Inst is the load/store instruction whose pointer operand is -/// the one we want to delinearize. \p AccessFn is its corresponding SCEV -/// expression w.r.t. the surrounding loop. -bool tryDelinearizeFixedSizeImpl(ScalarEvolution *SE, Instruction *Inst, - const SCEV *AccessFn, - SmallVectorImpl &Subscripts, - SmallVectorImpl &Sizes); - struct DelinearizationPrinterPass : public PassInfoMixin { explicit DelinearizationPrinterPass(raw_ostream &OS); diff --git a/llvm/include/llvm/Analysis/DependenceAnalysis.h b/llvm/include/llvm/Analysis/DependenceAnalysis.h index f603ae8dbd70f..04731569aa3f2 100644 --- a/llvm/include/llvm/Analysis/DependenceAnalysis.h +++ b/llvm/include/llvm/Analysis/DependenceAnalysis.h @@ -773,8 +773,8 @@ class DependenceInfo { SmallVectorImpl &Pair); /// Tries to delinearize \p Src and \p Dst access functions for a fixed size - /// multi-dimensional array. Calls tryDelinearizeFixedSizeImpl() to - /// delinearize \p Src and \p Dst separately, + /// multi-dimensional array. Calls delinearizeFixedSizeArray() to delinearize + /// \p Src and \p Dst separately, bool tryDelinearizeFixedSize(Instruction *Src, Instruction *Dst, const SCEV *SrcAccessFn, const SCEV *DstAccessFn, SmallVectorImpl &SrcSubscripts, diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h index 991aa49d787f9..2451d588bdbf7 100644 --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -437,6 +437,8 @@ enum ConstantsCodes { CST_CODE_CE_GEP_WITH_INRANGE = 31, // [opty, flags, range, n x operands] CST_CODE_CE_GEP = 32, // [opty, flags, n x operands] CST_CODE_PTRAUTH = 33, // [ptr, key, disc, addrdisc] + CST_CODE_PTRAUTH2 = 34, // [ptr, key, disc, addrdisc, + // deactivation_symbol] }; /// CastOpcodes - These are values used in the bitcode files to encode which diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h index a8bde824527a5..fea900f37ec74 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -159,6 +159,8 @@ class LLVM_ABI CallLowering { /// True if this call results in convergent operations. bool IsConvergent = true; + + GlobalValue *DeactivationSymbol = nullptr; }; /// Argument handling is mostly uniform between the four places that diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index 40c7792f7e8a2..5f3f1d386569c 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -56,6 +56,7 @@ struct MachineIRBuilderState { MDNode *PCSections = nullptr; /// MMRA Metadata to be set on any instruction we create. MDNode *MMRA = nullptr; + Value *DS = nullptr; /// \name Fields describing the insertion point. /// @{ @@ -369,6 +370,7 @@ class LLVM_ABI MachineIRBuilder { State.II = MI.getIterator(); setPCSections(MI.getPCSections()); setMMRAMetadata(MI.getMMRAMetadata()); + setDeactivationSymbol(MI.getDeactivationSymbol()); } /// @} @@ -405,6 +407,9 @@ class LLVM_ABI MachineIRBuilder { /// Set the PC sections metadata to \p MD for all the next build instructions. void setMMRAMetadata(MDNode *MMRA) { State.MMRA = MMRA; } + Value *getDeactivationSymbol() { return State.DS; } + void setDeactivationSymbol(Value *DS) { State.DS = DS; } + /// Get the current instruction's MMRA metadata. MDNode *getMMRAMetadata() { return State.MMRA; } diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h index cdaa916548c25..b32f3dacbb3a4 100644 --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1579,6 +1579,10 @@ enum NodeType { // Outputs: Output Chain CLEAR_CACHE, + // Untyped node storing deactivation symbol reference + // (DeactivationSymbolSDNode). + DEACTIVATION_SYMBOL, + /// BUILTIN_OP_END - This must be the last enum value in this list. /// The target-specific pre-isel opcode values start here. BUILTIN_OP_END diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h index ef783f276b7d4..08ffdb2cb469d 100644 --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -1207,7 +1207,7 @@ class LLVM_ABI MachineFunction { ArrayRef MMOs, MCSymbol *PreInstrSymbol = nullptr, MCSymbol *PostInstrSymbol = nullptr, MDNode *HeapAllocMarker = nullptr, MDNode *PCSections = nullptr, uint32_t CFIType = 0, - MDNode *MMRAs = nullptr); + MDNode *MMRAs = nullptr, Value *DS = nullptr); /// Allocate a string and populate it with the given external symbol name. const char *createExternalSymbolName(StringRef Name); diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index ca984459c365a..077e39b49df6f 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -160,8 +160,9 @@ class MachineInstr /// /// This has to be defined eagerly due to the implementation constraints of /// `PointerSumType` where it is used. - class ExtraInfo final : TrailingObjects { + class ExtraInfo final + : TrailingObjects { public: static ExtraInfo *create(BumpPtrAllocator &Allocator, ArrayRef MMOs, @@ -169,20 +170,23 @@ class MachineInstr MCSymbol *PostInstrSymbol = nullptr, MDNode *HeapAllocMarker = nullptr, MDNode *PCSections = nullptr, uint32_t CFIType = 0, - MDNode *MMRAs = nullptr) { + MDNode *MMRAs = nullptr, Value *DS = nullptr) { bool HasPreInstrSymbol = PreInstrSymbol != nullptr; bool HasPostInstrSymbol = PostInstrSymbol != nullptr; bool HasHeapAllocMarker = HeapAllocMarker != nullptr; bool HasMMRAs = MMRAs != nullptr; bool HasCFIType = CFIType != 0; bool HasPCSections = PCSections != nullptr; + bool HasDS = DS != nullptr; auto *Result = new (Allocator.Allocate( - totalSizeToAlloc( + totalSizeToAlloc( MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol, - HasHeapAllocMarker + HasPCSections + HasMMRAs, HasCFIType), + HasHeapAllocMarker + HasPCSections + HasMMRAs, HasCFIType, HasDS), alignof(ExtraInfo))) ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol, - HasHeapAllocMarker, HasPCSections, HasCFIType, HasMMRAs); + HasHeapAllocMarker, HasPCSections, HasCFIType, HasMMRAs, + HasDS); // Copy the actual data into the trailing objects. llvm::copy(MMOs, Result->getTrailingObjects()); @@ -202,6 +206,8 @@ class MachineInstr Result->getTrailingObjects()[0] = CFIType; if (HasMMRAs) Result->getTrailingObjects()[MDNodeIdx++] = MMRAs; + if (HasDS) + Result->getTrailingObjects()[0] = DS; return Result; } @@ -240,6 +246,10 @@ class MachineInstr : nullptr; } + Value *getDeactivationSymbol() const { + return HasDS ? getTrailingObjects()[0] : 0; + } + private: friend TrailingObjects; @@ -255,6 +265,7 @@ class MachineInstr const bool HasPCSections; const bool HasCFIType; const bool HasMMRAs; + const bool HasDS; // Implement the `TrailingObjects` internal API. size_t numTrailingObjects(OverloadToken) const { @@ -269,16 +280,17 @@ class MachineInstr size_t numTrailingObjects(OverloadToken) const { return HasCFIType; } + size_t numTrailingObjects(OverloadToken) const { return HasDS; } // Just a boring constructor to allow us to initialize the sizes. Always use // the `create` routine above. ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol, bool HasHeapAllocMarker, bool HasPCSections, bool HasCFIType, - bool HasMMRAs) + bool HasMMRAs, bool HasDS) : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol), HasPostInstrSymbol(HasPostInstrSymbol), HasHeapAllocMarker(HasHeapAllocMarker), HasPCSections(HasPCSections), - HasCFIType(HasCFIType), HasMMRAs(HasMMRAs) {} + HasCFIType(HasCFIType), HasMMRAs(HasMMRAs), HasDS(HasDS) {} }; /// Enumeration of the kinds of inline extra info available. It is important @@ -867,6 +879,14 @@ class MachineInstr return nullptr; } + Value *getDeactivationSymbol() const { + if (!Info) + return nullptr; + if (ExtraInfo *EI = Info.get()) + return EI->getDeactivationSymbol(); + return nullptr; + } + /// Helper to extract a CFI type hash if one has been added. uint32_t getCFIType() const { if (!Info) @@ -1969,6 +1989,8 @@ class MachineInstr /// Set the CFI type for the instruction. LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type); + LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS); + /// Return the MIFlags which represent both MachineInstrs. This /// should be used when merging two MachineInstrs into one. This routine does /// not modify the MIFlags of this MachineInstr. @@ -2088,7 +2110,7 @@ class MachineInstr void setExtraInfo(MachineFunction &MF, ArrayRef MMOs, MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, - uint32_t CFIType, MDNode *MMRAs); + uint32_t CFIType, MDNode *MMRAs, Value *DS); }; /// Special DenseMapInfo traits to compare MachineInstr* by *value* of the diff --git a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h index e705d7d99544c..caeb430d6fd1c 100644 --- a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h +++ b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h @@ -70,29 +70,44 @@ enum { } // end namespace RegState /// Set of metadata that should be preserved when using BuildMI(). This provides -/// a more convenient way of preserving DebugLoc, PCSections and MMRA. +/// a more convenient way of preserving certain data from the original +/// instruction. class MIMetadata { public: MIMetadata() = default; - MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr) - : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA) {} + MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr, + Value *DeactivationSymbol = nullptr) + : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA), + DeactivationSymbol(DeactivationSymbol) {} MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr) : DL(DI), PCSections(PCSections), MMRA(MMRA) {} explicit MIMetadata(const Instruction &From) : DL(From.getDebugLoc()), - PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {} + PCSections(From.getMetadata(LLVMContext::MD_pcsections)), + DeactivationSymbol(getDeactivationSymbol(&From)) {} explicit MIMetadata(const MachineInstr &From) - : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {} + : DL(From.getDebugLoc()), PCSections(From.getPCSections()), + DeactivationSymbol(From.getDeactivationSymbol()) {} const DebugLoc &getDL() const { return DL; } MDNode *getPCSections() const { return PCSections; } MDNode *getMMRAMetadata() const { return MMRA; } + Value *getDeactivationSymbol() const { return DeactivationSymbol; } private: DebugLoc DL; MDNode *PCSections = nullptr; MDNode *MMRA = nullptr; + Value *DeactivationSymbol = nullptr; + + static inline Value *getDeactivationSymbol(const Instruction *I) { + if (auto *CB = dyn_cast(I)) + if (auto Bundle = + CB->getOperandBundle(llvm::LLVMContext::OB_deactivation_symbol)) + return Bundle->Inputs[0].get(); + return nullptr; + } }; class MachineInstrBuilder { @@ -348,6 +363,8 @@ class MachineInstrBuilder { MI->setPCSections(*MF, MIMD.getPCSections()); if (MIMD.getMMRAMetadata()) MI->setMMRAMetadata(*MF, MIMD.getMMRAMetadata()); + if (MIMD.getDeactivationSymbol()) + MI->setDeactivationSymbol(*MF, MIMD.getDeactivationSymbol()); return *this; } diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index b024e8a68bd6e..501cbc947132e 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -759,6 +759,7 @@ class SelectionDAG { int64_t offset = 0, unsigned TargetFlags = 0) { return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags); } + LLVM_ABI SDValue getDeactivationSymbol(const GlobalValue *GV); LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false); SDValue getTargetFrameIndex(int FI, EVT VT) { return getFrameIndex(FI, VT, true); diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h index c5cdf76f4777e..7add717227963 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h @@ -151,6 +151,7 @@ class SelectionDAGISel { OPC_RecordChild7, OPC_RecordMemRef, OPC_CaptureGlueInput, + OPC_CaptureDeactivationSymbol, OPC_MoveChild, OPC_MoveChild0, OPC_MoveChild1, diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index cfc8a4243e894..aa72e81b2ab54 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -2005,6 +2005,22 @@ class GlobalAddressSDNode : public SDNode { } }; +class DeactivationSymbolSDNode : public SDNode { + friend class SelectionDAG; + + const GlobalValue *TheGlobal; + + DeactivationSymbolSDNode(const GlobalValue *GV, SDVTList VTs) + : SDNode(ISD::DEACTIVATION_SYMBOL, 0, DebugLoc(), VTs), TheGlobal(GV) {} + +public: + const GlobalValue *getGlobal() const { return TheGlobal; } + + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::DEACTIVATION_SYMBOL; + } +}; + class FrameIndexSDNode : public SDNode { friend class SelectionDAG; diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 7df5d8a09f0f6..b2697c81fd825 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4765,6 +4765,7 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase { SmallVector InVals; const ConstantInt *CFIType = nullptr; SDValue ConvergenceControlToken; + GlobalValue *DeactivationSymbol = nullptr; std::optional PAI; @@ -4918,6 +4919,11 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase { return *this; } + CallLoweringInfo &setDeactivationSymbol(GlobalValue *Sym) { + DeactivationSymbol = Sym; + return *this; + } + ArgListTy &getArgs() { return Args; } diff --git a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h index 0b46c7fb1f445..93412d9d22f8c 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h @@ -338,9 +338,9 @@ template class WaitingOnGraph { // incorporate NewSNs. std::vector> ReadyNodes, FailedNodes; processReadyOrFailed(ModifiedPendingSNs, ReadyNodes, FailedNodes, - SuperNodeDeps, ElemToPendingSN, FailedSNs); + SuperNodeDeps, FailedSNs, &ElemToPendingSN); processReadyOrFailed(NewSNs, ReadyNodes, FailedNodes, SuperNodeDeps, - ElemToNewSN, FailedSNs); + FailedSNs, nullptr); CoalesceToPendingSNs.coalesce(ModifiedPendingSNs, ElemToPendingSN); CoalesceToPendingSNs.coalesce(NewSNs, ElemToPendingSN); @@ -591,8 +591,11 @@ template class WaitingOnGraph { std::vector> &Ready, std::vector> &Failed, SuperNodeDepsMap &SuperNodeDeps, - ElemToSuperNodeMap &ElemToSNs, - const std::vector &FailedSNs) { + const std::vector &FailedSNs, + ElemToSuperNodeMap *ElemToSNs) { + + SmallVector ToRemoveFromElemToSNs; + for (size_t I = 0; I != SNs.size();) { auto &SN = SNs[I]; @@ -609,6 +612,8 @@ template class WaitingOnGraph { bool SNReady = SN->Deps.empty(); if (SNReady || SNFailed) { + if (ElemToSNs) + ToRemoveFromElemToSNs.push_back(SN.get()); auto &NodeList = SNFailed ? Failed : Ready; NodeList.push_back(std::move(SN)); std::swap(SN, SNs.back()); @@ -616,6 +621,15 @@ template class WaitingOnGraph { } else ++I; } + + // Update ElemToSNs (if passed) to remove elements pointing at SN. + for (auto *SN : ToRemoveFromElemToSNs) { + for (auto &[Container, Elems] : SN->defs()) { + auto &Row = (*ElemToSNs)[Container]; + for (auto &Elem : Elems) + Row.erase(Elem); + } + } } std::vector> PendingSNs; diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h index f864a895a1259..7b097d1ac0ee0 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -1446,6 +1446,9 @@ class OpenMPIRBuilder { using ReductionGenAtomicCBTy = std::function; + using ReductionGenDataPtrPtrCBTy = std::function; + /// Enum class for reduction evaluation types scalar, complex and aggregate. enum class EvalKind { Scalar, Complex, Aggregate }; @@ -1454,17 +1457,25 @@ class OpenMPIRBuilder { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, EvalKind EvaluationKind, ReductionGenCBTy ReductionGen, ReductionGenClangCBTy ReductionGenClang, - ReductionGenAtomicCBTy AtomicReductionGen) + ReductionGenAtomicCBTy AtomicReductionGen, + ReductionGenDataPtrPtrCBTy DataPtrPtrGen, + Type *ByRefAllocatedType = nullptr, + Type *ByRefElementType = nullptr) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), EvaluationKind(EvaluationKind), ReductionGen(ReductionGen), ReductionGenClang(ReductionGenClang), - AtomicReductionGen(AtomicReductionGen) {} + AtomicReductionGen(AtomicReductionGen), DataPtrPtrGen(DataPtrPtrGen), + ByRefAllocatedType(ByRefAllocatedType), + ByRefElementType(ByRefElementType) {} + ReductionInfo(Value *PrivateVariable) : ElementType(nullptr), Variable(nullptr), PrivateVariable(PrivateVariable), EvaluationKind(EvalKind::Scalar), - ReductionGen(), ReductionGenClang(), AtomicReductionGen() {} + ReductionGen(), ReductionGenClang(), AtomicReductionGen(), + DataPtrPtrGen() {} - /// Reduction element type, must match pointee type of variable. + /// Reduction element type, must match pointee type of variable. For by-ref + /// reductions, this would be just an opaque `ptr`. Type *ElementType; /// Reduction variable of pointer type. @@ -1491,6 +1502,21 @@ class OpenMPIRBuilder { /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. ReductionGenAtomicCBTy AtomicReductionGen; + + ReductionGenDataPtrPtrCBTy DataPtrPtrGen; + + /// For by-ref reductions, we need to keep track of 2 extra types that are + /// potentially different: + /// * The allocated type is the type of the storage allocated by the + /// reduction op's `alloc` region. For example, for allocatables and arrays, + /// this type would be the descriptor/box struct. + Type *ByRefAllocatedType; + + /// * The by-ref element type is the type of the actual storage needed for + /// the data of the allocatable or array. For example, an float allocatable + /// of would need some float storage to store intermediate reduction + /// results. + Type *ByRefElementType; }; enum class CopyAction : unsigned { @@ -1535,14 +1561,15 @@ class OpenMPIRBuilder { /// Function to shuffle over the value from the remote lane. void shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr, - Type *ElementType, Value *Offset, - Type *ReductionArrayTy); + Type *ElementType, Value *Offset, Type *ReductionArrayTy, + bool IsByRefElem); /// Emit instructions to copy a Reduce list, which contains partially /// aggregated values, in the specified direction. - void emitReductionListCopy( + Error emitReductionListCopy( InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy, ArrayRef ReductionInfos, Value *SrcBase, Value *DestBase, + ArrayRef IsByRef, CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}); /// Emit a helper that reduces data across two OpenMP threads (lanes) @@ -1616,11 +1643,13 @@ class OpenMPIRBuilder { /// \param ReduceFn The reduction function. /// \param FuncAttrs Optional param to specify any function attributes that /// need to be copied to the new function. + /// \param IsByRef For each reduction clause, whether the reduction is by-ref + /// or not. /// /// \return The ShuffleAndReduce function. - Function *emitShuffleAndReduceFunction( + Expected emitShuffleAndReduceFunction( ArrayRef ReductionInfos, - Function *ReduceFn, AttributeList FuncAttrs); + Function *ReduceFn, AttributeList FuncAttrs, ArrayRef IsByRef); /// Helper function for CreateCanonicalScanLoops to create InputLoop /// in the firstGen and Scan Loop in the SecondGen @@ -1680,12 +1709,14 @@ class OpenMPIRBuilder { /// \param ReductionInfos Array type containing the ReductionOps. /// \param FuncAttrs Optional param to specify any function attributes that /// need to be copied to the new function. + /// \param IsByRef For each reduction clause, whether the reduction is by-ref + /// or not. /// /// \return The InterWarpCopy function. Expected emitInterWarpCopyFunction(const LocationDescription &Loc, ArrayRef ReductionInfos, - AttributeList FuncAttrs); + AttributeList FuncAttrs, ArrayRef IsByRef); /// This function emits a helper that copies all the reduction variables from /// the team into the provided global buffer for the reduction variables. @@ -1779,6 +1810,7 @@ class OpenMPIRBuilder { /// \return The reduction function. Expected createReductionFunction( StringRef ReducerName, ArrayRef ReductionInfos, + ArrayRef IsByRef, ReductionGenCBKind ReductionGenCBKind = ReductionGenCBKind::MLIR, AttributeList FuncAttrs = {}); @@ -2031,11 +2063,13 @@ class OpenMPIRBuilder { /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. - /// \param CodeGenIP An insertion point suitable for code - /// generation. \param ReductionInfos A list of info on each reduction - /// variable. \param IsNoWait Optional flag set if the reduction is - /// marked as - /// nowait. + /// \param CodeGenIP An insertion point suitable for code + /// generation. + /// \param ReductionInfos A list of info on each reduction + /// variable. + /// \param IsNoWait Optional flag set if the reduction is + /// marked as nowait. + /// \param IsByRef For each reduction clause, whether the reduction is by-ref. /// \param IsTeamsReduction Optional flag set if it is a teams /// reduction. /// \param GridValue Optional GPU grid value. @@ -2045,7 +2079,8 @@ class OpenMPIRBuilder { LLVM_ABI InsertPointOrErrorTy createReductionsGPU( const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef ReductionInfos, - bool IsNoWait = false, bool IsTeamsReduction = false, + ArrayRef IsByRef, bool IsNoWait = false, + bool IsTeamsReduction = false, ReductionGenCBKind ReductionGenCBKind = ReductionGenCBKind::MLIR, std::optional GridValue = {}, unsigned ReductionBufNum = 1024, Value *SrcLocInfo = nullptr); diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h index e06e6adbc3130..e3f2eb9fa44b8 100644 --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -1033,10 +1033,10 @@ class ConstantPtrAuth final : public Constant { friend struct ConstantPtrAuthKeyType; friend class Constant; - constexpr static IntrusiveOperandsAllocMarker AllocMarker{4}; + constexpr static IntrusiveOperandsAllocMarker AllocMarker{5}; ConstantPtrAuth(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, - Constant *AddrDisc); + Constant *AddrDisc, Constant *DeactivationSymbol); void *operator new(size_t s) { return User::operator new(s, AllocMarker); } @@ -1046,7 +1046,8 @@ class ConstantPtrAuth final : public Constant { public: /// Return a pointer signed with the specified parameters. LLVM_ABI static ConstantPtrAuth *get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc); + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol); /// Produce a new ptrauth expression signing the given value using /// the same schema as is stored in one. @@ -1078,6 +1079,10 @@ class ConstantPtrAuth final : public Constant { return !getAddrDiscriminator()->isNullValue(); } + Constant *getDeactivationSymbol() const { + return cast(Op<4>().get()); + } + /// A constant value for the address discriminator which has special /// significance to ctors/dtors lowering. Regular address discrimination can't /// be applied for them since uses of llvm.global_{c|d}tors are disallowed @@ -1106,7 +1111,7 @@ class ConstantPtrAuth final : public Constant { template <> struct OperandTraits - : public FixedNumOperandTraits {}; + : public FixedNumOperandTraits {}; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPtrAuth, Constant) diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h index 5972dcb637dfa..d938f4609742b 100644 --- a/llvm/include/llvm/IR/LLVMContext.h +++ b/llvm/include/llvm/IR/LLVMContext.h @@ -98,7 +98,8 @@ class LLVMContext { OB_kcfi = 8, // "kcfi" OB_convergencectrl = 9, // "convergencectrl" OB_align = 10, // "align" - OB_LastBundleID = OB_align // Marker for last bundle ID + OB_deactivation_symbol = 11, // "deactivation-symbol" + OB_LastBundleID = OB_deactivation_symbol }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.h b/llvm/include/llvm/IR/RuntimeLibcalls.h index cf96547063cd0..41fe448382992 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.h +++ b/llvm/include/llvm/IR/RuntimeLibcalls.h @@ -211,6 +211,16 @@ struct RuntimeLibcallsInfo { return true; } + static bool darwinHasMemsetPattern(const Triple &TT) { + // memset_pattern{4,8,16} is only available on iOS 3.0 and Mac OS X 10.5 and + // later. All versions of watchOS support it. + if (TT.isMacOSX()) + return !TT.isMacOSXVersionLT(10, 5); + if (TT.isiOS()) + return !TT.isOSVersionLT(3, 0); + return TT.isWatchOS(); + } + static bool hasAEABILibcalls(const Triple &TT) { return TT.isTargetAEABI() || TT.isTargetGNUAEABI() || TT.isTargetMuslAEABI() || TT.isOSFuchsia() || TT.isAndroid(); diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td index 11e6127e0741d..794ab2449bc01 100644 --- a/llvm/include/llvm/IR/RuntimeLibcalls.td +++ b/llvm/include/llvm/IR/RuntimeLibcalls.td @@ -50,6 +50,7 @@ def isWindowsMSVCOrItaniumEnvironment : RuntimeLibcallPredicate< def isGNUEnvironment : RuntimeLibcallPredicate<"TT.isGNUEnvironment()">; def darwinHasSinCosStret : RuntimeLibcallPredicate<"darwinHasSinCosStret(TT)">; def darwinHasExp10 : RuntimeLibcallPredicate<"darwinHasExp10(TT)">; +def darwinHasMemsetPattern : RuntimeLibcallPredicate<[{darwinHasMemsetPattern(TT)}]>; def hasExp10 : RuntimeLibcallPredicate<[{!TT.isOSDarwin()}]>; @@ -154,6 +155,8 @@ foreach FPTy = ["F32", "F64", "F80", "F128", "PPCF128"] in { def SINCOS_#FPTy : RuntimeLibcall; def REMQUO_#FPTy : RuntimeLibcall; def FDIM_#FPTy : RuntimeLibcall; + + def CABS_#FPTy : RuntimeLibcall; } foreach FPTy = [ "F32", "F64" ] in { @@ -382,7 +385,9 @@ def MEMMOVE : RuntimeLibcall; def MEMMOVE_CHK : RuntimeLibcall; def MEMSET : RuntimeLibcall; def MEMSET_CHK : RuntimeLibcall; +def MALLOC : RuntimeLibcall; def CALLOC : RuntimeLibcall; +def FREE : RuntimeLibcall; def BZERO : RuntimeLibcall; def STRLEN : RuntimeLibcall; @@ -569,6 +574,302 @@ def OBJC_RETAIN_AUTORELEASE : RuntimeLibcall; def OBJC_SYNC_ENTER : RuntimeLibcall; def OBJC_SYNC_EXIT : RuntimeLibcall; +def ABORT : RuntimeLibcall; +def ABS : RuntimeLibcall; +def ACCESS : RuntimeLibcall; +def ALIGNED_ALLOC : RuntimeLibcall; +def ATEXIT : RuntimeLibcall; +def ATOF : RuntimeLibcall; +def ATOI : RuntimeLibcall; +def ATOL : RuntimeLibcall; +def ATOLL : RuntimeLibcall; +def BCMP : RuntimeLibcall; +def BCOPY : RuntimeLibcall; +def CHMOD : RuntimeLibcall; +def CHOWN : RuntimeLibcall; +def CLEARERR : RuntimeLibcall; +def CLOSEDIR : RuntimeLibcall; +def CTERMID : RuntimeLibcall; +def CXA_ATEXIT : RuntimeLibcall; +def CXA_GUARD_ABORT : RuntimeLibcall; +def CXA_GUARD_ACQUIRE : RuntimeLibcall; +def CXA_GUARD_RELEASE : RuntimeLibcall; +def CXA_THROW : RuntimeLibcall; +def DUNDER_ISOC99_SCANF : RuntimeLibcall; +def DUNDER_ISOC99_SSCANF : RuntimeLibcall; +def DUNDER_STRDUP : RuntimeLibcall; +def DUNDER_STRNDUP : RuntimeLibcall; +def DUNDER_STRTOK_R : RuntimeLibcall; +def ENUM_VARIANT : RuntimeLibcall; +def EXECL : RuntimeLibcall; +def EXECLE : RuntimeLibcall; +def EXECLP : RuntimeLibcall; +def EXECV : RuntimeLibcall; +def EXECVE : RuntimeLibcall; +def EXECVP : RuntimeLibcall; +def EXECVPE : RuntimeLibcall; +def EXIT : RuntimeLibcall; +def FCLOSE : RuntimeLibcall; +def FDOPEN : RuntimeLibcall; +def FEOF : RuntimeLibcall; +def FERROR : RuntimeLibcall; +def FFLUSH : RuntimeLibcall; +def FFS : RuntimeLibcall; +def FFSL : RuntimeLibcall; +def FFSLL : RuntimeLibcall; +def FGETC : RuntimeLibcall; +def FGETC_UNLOCKED : RuntimeLibcall; +def FGETPOS : RuntimeLibcall; +def FGETS : RuntimeLibcall; +def FGETS_UNLOCKED : RuntimeLibcall; +def FILENO : RuntimeLibcall; +def FIPRINTF : RuntimeLibcall; +def FLOCKFILE : RuntimeLibcall; +def FLS : RuntimeLibcall; +def FLSL : RuntimeLibcall; +def FLSLL : RuntimeLibcall; +def FOPEN : RuntimeLibcall; +def FOPEN64 : RuntimeLibcall; +def FORK : RuntimeLibcall; +def FPRINTF : RuntimeLibcall; +def FPUTC : RuntimeLibcall; +def FPUTC_UNLOCKED : RuntimeLibcall; +def FPUTS : RuntimeLibcall; +def FPUTS_UNLOCKED : RuntimeLibcall; +def FREAD : RuntimeLibcall; +def FREAD_UNLOCKED : RuntimeLibcall; +def FSCANF : RuntimeLibcall; +def FSEEK : RuntimeLibcall; +def FSEEKO : RuntimeLibcall; +def FSEEKO64 : RuntimeLibcall; +def FSETPOS : RuntimeLibcall; +def FSTAT : RuntimeLibcall; +def FSTAT64 : RuntimeLibcall; +def FSTATVFS : RuntimeLibcall; +def FSTATVFS64 : RuntimeLibcall; +def FTELL : RuntimeLibcall; +def FTELLO : RuntimeLibcall; +def FTELLO64 : RuntimeLibcall; +def FTRYLOCKFILE : RuntimeLibcall; +def FUNLOCKFILE : RuntimeLibcall; +def FWRITE : RuntimeLibcall; +def FWRITE_UNLOCKED : RuntimeLibcall; +def GETC : RuntimeLibcall; +def GETCHAR : RuntimeLibcall; +def GETCHAR_UNLOCKED : RuntimeLibcall; +def GETC_UNLOCKED : RuntimeLibcall; +def GETENV : RuntimeLibcall; +def GETITIMER : RuntimeLibcall; +def GETLOGIN_R : RuntimeLibcall; +def GETPWNAM : RuntimeLibcall; +def GETS : RuntimeLibcall; +def GETTIMEOFDAY : RuntimeLibcall; +def HTONL : RuntimeLibcall; +def HTONS : RuntimeLibcall; +def IPRINTF : RuntimeLibcall; +def ISASCII : RuntimeLibcall; +def ISDIGIT : RuntimeLibcall; +def LABS : RuntimeLibcall; +def LCHOWN : RuntimeLibcall; +def LLABS : RuntimeLibcall; +def LSTAT : RuntimeLibcall; +def LSTAT64 : RuntimeLibcall; +def MEMALIGN : RuntimeLibcall; +def MEMCCPY : RuntimeLibcall; +def MEMCCPY_CHK : RuntimeLibcall; +def MEMCHR : RuntimeLibcall; +def MEMPCPY : RuntimeLibcall; +def MEMPCPY_CHK : RuntimeLibcall; +def MEMRCHR : RuntimeLibcall; +def MEMSET_PATTERN16 : RuntimeLibcall; +def MEMSET_PATTERN4 : RuntimeLibcall; +def MEMSET_PATTERN8 : RuntimeLibcall; +def MKDIR : RuntimeLibcall; +def MKTIME : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR32 : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR32_INT : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR32_NOTHROW : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR64 : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR64_LONGLONG : RuntimeLibcall; +def MSVC_DELETE_ARRAY_PTR64_NOTHROW : RuntimeLibcall; +def MSVC_DELETE_PTR32 : RuntimeLibcall; +def MSVC_DELETE_PTR32_INT : RuntimeLibcall; +def MSVC_DELETE_PTR32_NOTHROW : RuntimeLibcall; +def MSVC_DELETE_PTR64 : RuntimeLibcall; +def MSVC_DELETE_PTR64_LONGLONG : RuntimeLibcall; +def MSVC_DELETE_PTR64_NOTHROW : RuntimeLibcall; +def MSVC_NEW_ARRAY_INT : RuntimeLibcall; +def MSVC_NEW_ARRAY_INT_NOTHROW : RuntimeLibcall; +def MSVC_NEW_ARRAY_LONGLONG : RuntimeLibcall; +def MSVC_NEW_ARRAY_LONGLONG_NOTHROW : RuntimeLibcall; +def MSVC_NEW_INT : RuntimeLibcall; +def MSVC_NEW_INT_NOTHROW : RuntimeLibcall; +def MSVC_NEW_LONGLONG : RuntimeLibcall; +def MSVC_NEW_LONGLONG_NOTHROW : RuntimeLibcall; +def NTOHL : RuntimeLibcall; +def NTOHS : RuntimeLibcall; +def OPEN : RuntimeLibcall; +def OPEN64 : RuntimeLibcall; +def OPENDIR : RuntimeLibcall; +def PCLOSE : RuntimeLibcall; +def PERROR : RuntimeLibcall; +def POPEN : RuntimeLibcall; +def POSIX_MEMALIGN : RuntimeLibcall; +def PREAD : RuntimeLibcall; +def PRINTF : RuntimeLibcall; +def PUTC : RuntimeLibcall; +def PUTCHAR : RuntimeLibcall; +def PUTCHAR_UNLOCKED : RuntimeLibcall; +def PUTC_UNLOCKED : RuntimeLibcall; +def PUTS : RuntimeLibcall; +def PVALLOC : RuntimeLibcall; +def PWRITE : RuntimeLibcall; +def QSORT : RuntimeLibcall; +def READ : RuntimeLibcall; +def READLINK : RuntimeLibcall; +def REALLOC : RuntimeLibcall; +def REALLOCARRAY : RuntimeLibcall; +def REALLOCF : RuntimeLibcall; +def REALPATH : RuntimeLibcall; +def REMOVE : RuntimeLibcall; +def RENAME : RuntimeLibcall; +def REWIND : RuntimeLibcall; +def RMDIR : RuntimeLibcall; +def SCANF : RuntimeLibcall; +def SETBUF : RuntimeLibcall; +def SETITIMER : RuntimeLibcall; +def SETVBUF : RuntimeLibcall; +def SIPRINTF : RuntimeLibcall; +def SIZE_RETURNING_NEW : RuntimeLibcall; +def SIZE_RETURNING_NEW_ALIGNED : RuntimeLibcall; +def SIZE_RETURNING_NEW_ALIGNED_HOT_COLD : RuntimeLibcall; +def SIZE_RETURNING_NEW_HOT_COLD : RuntimeLibcall; +def SMALL_FPRINTF : RuntimeLibcall; +def SMALL_PRINTF : RuntimeLibcall; +def SMALL_SPRINTF : RuntimeLibcall; +def SNPRINTF : RuntimeLibcall; +def SNPRINTF_CHK : RuntimeLibcall; +def SPRINTF : RuntimeLibcall; +def SPRINTF_CHK : RuntimeLibcall; +def SSCANF : RuntimeLibcall; +def STAT : RuntimeLibcall; +def STAT64 : RuntimeLibcall; +def STATVFS : RuntimeLibcall; +def STATVFS64 : RuntimeLibcall; +def STPCPY : RuntimeLibcall; +def STPCPY_CHK : RuntimeLibcall; +def STPNCPY : RuntimeLibcall; +def STPNCPY_CHK : RuntimeLibcall; +def STRCASECMP : RuntimeLibcall; +def STRCAT : RuntimeLibcall; +def STRCAT_CHK : RuntimeLibcall; +def STRCHR : RuntimeLibcall; +def STRCMP : RuntimeLibcall; +def STRCOLL : RuntimeLibcall; +def STRCPY : RuntimeLibcall; +def STRCPY_CHK : RuntimeLibcall; +def STRCSPN : RuntimeLibcall; +def STRDUP : RuntimeLibcall; +def STRLCAT : RuntimeLibcall; +def STRLCAT_CHK : RuntimeLibcall; +def STRLCPY : RuntimeLibcall; +def STRLCPY_CHK : RuntimeLibcall; +def STRLEN_CHK : RuntimeLibcall; +def STRNCASECMP : RuntimeLibcall; +def STRNCAT : RuntimeLibcall; +def STRNCAT_CHK : RuntimeLibcall; +def STRNCMP : RuntimeLibcall; +def STRNCPY : RuntimeLibcall; +def STRNCPY_CHK : RuntimeLibcall; +def STRNDUP : RuntimeLibcall; +def STRNLEN : RuntimeLibcall; +def STRPBRK : RuntimeLibcall; +def STRRCHR : RuntimeLibcall; +def STRSPN : RuntimeLibcall; +def STRSTR : RuntimeLibcall; +def STRTOD : RuntimeLibcall; +def STRTOF : RuntimeLibcall; +def STRTOK : RuntimeLibcall; +def STRTOK_R : RuntimeLibcall; +def STRTOL : RuntimeLibcall; +def STRTOLD : RuntimeLibcall; +def STRTOLL : RuntimeLibcall; +def STRTOUL : RuntimeLibcall; +def STRTOULL : RuntimeLibcall; +def STRXFRM : RuntimeLibcall; +def SYSTEM : RuntimeLibcall; +def TERMINATE : RuntimeLibcall; +def TIMES : RuntimeLibcall; +def TMPFILE : RuntimeLibcall; +def TMPFILE64 : RuntimeLibcall; +def TOASCII : RuntimeLibcall; +def UNAME : RuntimeLibcall; +def UNDER_IO_GETC : RuntimeLibcall; +def UNDER_IO_PUTC : RuntimeLibcall; +def UNGETC : RuntimeLibcall; +def UNLINK : RuntimeLibcall; +def UNSETENV : RuntimeLibcall; +def UTIME : RuntimeLibcall; +def UTIMES : RuntimeLibcall; +def VALLOC : RuntimeLibcall; +def VEC_CALLOC : RuntimeLibcall; +def VEC_FREE : RuntimeLibcall; +def VEC_MALLOC : RuntimeLibcall; +def VEC_REALLOC : RuntimeLibcall; +def VFPRINTF : RuntimeLibcall; +def VFSCANF : RuntimeLibcall; +def VPRINTF : RuntimeLibcall; +def VSCANF : RuntimeLibcall; +def VSNPRINTF : RuntimeLibcall; +def VSNPRINTF_CHK : RuntimeLibcall; +def VSPRINTF : RuntimeLibcall; +def VSPRINTF_CHK : RuntimeLibcall; +def VSSCANF : RuntimeLibcall; +def WCSLEN : RuntimeLibcall; +def WRITE : RuntimeLibcall; +def ZDAPV : RuntimeLibcall; +def ZDAPVJ : RuntimeLibcall; +def ZDAPVJST11ALIGN_VAL_T : RuntimeLibcall; +def ZDAPVM : RuntimeLibcall; +def ZDAPVMST11ALIGN_VAL_T : RuntimeLibcall; +def ZDAPVRKST9NOTHROW_T : RuntimeLibcall; +def ZDAPVST11ALIGN_VAL_T : RuntimeLibcall; +def ZDAPVST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZDLPV : RuntimeLibcall; +def ZDLPVJ : RuntimeLibcall; +def ZDLPVJST11ALIGN_VAL_T : RuntimeLibcall; +def ZDLPVM : RuntimeLibcall; +def ZDLPVMST11ALIGN_VAL_T : RuntimeLibcall; +def ZDLPVRKST9NOTHROW_T : RuntimeLibcall; +def ZDLPVST11ALIGN_VAL_T : RuntimeLibcall; +def ZDLPVST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNAJ : RuntimeLibcall; +def ZNAJRKST9NOTHROW_T : RuntimeLibcall; +def ZNAJST11ALIGN_VAL_T : RuntimeLibcall; +def ZNAJST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNAM : RuntimeLibcall; +def ZNAM12__HOT_COLD_T : RuntimeLibcall; +def ZNAMRKST9NOTHROW_T : RuntimeLibcall; +def ZNAMRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_T12__HOT_COLD_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNAMST11ALIGN_VAL_TRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def ZNWJ : RuntimeLibcall; +def ZNWJRKST9NOTHROW_T : RuntimeLibcall; +def ZNWJST11ALIGN_VAL_T : RuntimeLibcall; +def ZNWJST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNWM : RuntimeLibcall; +def ZNWM12__HOT_COLD_T : RuntimeLibcall; +def ZNWMRKST9NOTHROW_T : RuntimeLibcall; +def ZNWMRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_T12__HOT_COLD_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_TRKST9NOTHROW_T : RuntimeLibcall; +def ZNWMST11ALIGN_VAL_TRKST9NOTHROW_T12__HOT_COLD_T : RuntimeLibcall; +def KMPC_ALLOC_SHARED : RuntimeLibcall; +def KMPC_FREE_SHARED : RuntimeLibcall; + //-------------------------------------------------------------------- // Global variable references //-------------------------------------------------------------------- @@ -1101,8 +1402,11 @@ def __memcpy_chk : RuntimeLibcallImpl; def __memmove_chk : RuntimeLibcallImpl; def __memset_chk : RuntimeLibcallImpl; +def malloc : RuntimeLibcallImpl; + // DSEPass can emit calloc if it finds a pair of malloc/memset def calloc : RuntimeLibcallImpl; +def free : RuntimeLibcallImpl; } // End let IsDefault = true @@ -1115,6 +1419,353 @@ def exp10l_ppcf128 : RuntimeLibcallImpl; // Stack Protector Fail def __stack_chk_fail : RuntimeLibcallImpl; +//-------------------------------------------------------------------- +// Other functions from TargetLibraryInfo +// +// TODO: These need to be organized by library and added to relevant +// systems. +/// +// -------------------------------------------------------------------- + +def __2_YAPAXI_Z : RuntimeLibcallImpl; +def __2_YAPAXIABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __2_YAPEAX_K_Z : RuntimeLibcallImpl; +def __2_YAPEAX_KAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __3_YAXPAX_Z : RuntimeLibcallImpl; +def __3_YAXPAXABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __3_YAXPAXI_Z : RuntimeLibcallImpl; +def __3_YAXPEAX_Z : RuntimeLibcallImpl; +def __3_YAXPEAXAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def __3_YAXPEAX_K_Z + : RuntimeLibcallImpl; +def ___U_YAPAXI_Z : RuntimeLibcallImpl; +def ___U_YAPAXIABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___U_YAPEAX_K_Z + : RuntimeLibcallImpl; +def ___U_YAPEAX_KAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___V_YAXPAX_Z + : RuntimeLibcallImpl; +def ___V_YAXPAXABUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___V_YAXPAXI_Z + : RuntimeLibcallImpl; +def ___V_YAXPEAX_Z + : RuntimeLibcallImpl; +def ___V_YAXPEAXAEBUnothrow_t_std___Z + : RuntimeLibcallImpl; +def ___V_YAXPEAX_K_Z + : RuntimeLibcallImpl; +def _IO_getc : RuntimeLibcallImpl; +def _IO_putc : RuntimeLibcallImpl; +def _ZdaPv : RuntimeLibcallImpl; +def _ZdaPvRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZdaPvSt11align_val_t : RuntimeLibcallImpl; +def _ZdaPvSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZdaPvj : RuntimeLibcallImpl; +def _ZdaPvjSt11align_val_t : RuntimeLibcallImpl; +def _ZdaPvm : RuntimeLibcallImpl; +def _ZdaPvmSt11align_val_t : RuntimeLibcallImpl; +def _ZdlPv : RuntimeLibcallImpl; +def _ZdlPvRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZdlPvSt11align_val_t : RuntimeLibcallImpl; +def _ZdlPvSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZdlPvj : RuntimeLibcallImpl; +def _ZdlPvjSt11align_val_t : RuntimeLibcallImpl; +def _ZdlPvm : RuntimeLibcallImpl; +def _ZdlPvmSt11align_val_t : RuntimeLibcallImpl; +def _Znaj : RuntimeLibcallImpl; +def _ZnajRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnajSt11align_val_t : RuntimeLibcallImpl; +def _ZnajSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _Znam : RuntimeLibcallImpl; +def _Znam12__hot_cold_t : RuntimeLibcallImpl; +def _ZnamRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnamRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnamSt11align_val_t : RuntimeLibcallImpl; +def _ZnamSt11align_val_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnamSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def _Znwj : RuntimeLibcallImpl; +def _ZnwjRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnwjSt11align_val_t : RuntimeLibcallImpl; +def _ZnwjSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _Znwm : RuntimeLibcallImpl; +def _Znwm12__hot_cold_t : RuntimeLibcallImpl; +def _ZnwmRKSt9nothrow_t : RuntimeLibcallImpl; +def _ZnwmRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnwmSt11align_val_t : RuntimeLibcallImpl; +def _ZnwmSt11align_val_t12__hot_cold_t + : RuntimeLibcallImpl; +def _ZnwmSt11align_val_tRKSt9nothrow_t + : RuntimeLibcallImpl; +def _ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t + : RuntimeLibcallImpl; +def __size_returning_new : RuntimeLibcallImpl; +def __size_returning_new_hot_cold + : RuntimeLibcallImpl; +def __size_returning_new_aligned + : RuntimeLibcallImpl; +def __size_returning_new_aligned_hot_cold + : RuntimeLibcallImpl; +def __cxa_atexit : RuntimeLibcallImpl; +def atexit : RuntimeLibcallImpl; +def abort : RuntimeLibcallImpl; +def exit : RuntimeLibcallImpl; +def _Exit : RuntimeLibcallImpl; +def _ZSt9terminatev : RuntimeLibcallImpl; +def __cxa_throw : RuntimeLibcallImpl; +def __cxa_guard_abort : RuntimeLibcallImpl; +def __cxa_guard_acquire : RuntimeLibcallImpl; +def __cxa_guard_release : RuntimeLibcallImpl; +def __isoc99_scanf : RuntimeLibcallImpl; +def __isoc99_sscanf : RuntimeLibcallImpl; +def __kmpc_alloc_shared : RuntimeLibcallImpl; +def __kmpc_free_shared : RuntimeLibcallImpl; +def __memccpy_chk : RuntimeLibcallImpl; +def __mempcpy_chk : RuntimeLibcallImpl; +def __small_fprintf : RuntimeLibcallImpl; +def __small_printf : RuntimeLibcallImpl; +def __small_sprintf : RuntimeLibcallImpl; +def __snprintf_chk : RuntimeLibcallImpl; +def __sprintf_chk : RuntimeLibcallImpl; +def __stpcpy_chk : RuntimeLibcallImpl; +def __stpncpy_chk : RuntimeLibcallImpl; +def __strcat_chk : RuntimeLibcallImpl; +def __strcpy_chk : RuntimeLibcallImpl; +def __strdup : RuntimeLibcallImpl; +def __strlcat_chk : RuntimeLibcallImpl; +def __strlcpy_chk : RuntimeLibcallImpl; +def __strlen_chk : RuntimeLibcallImpl; +def __strncat_chk : RuntimeLibcallImpl; +def __strncpy_chk : RuntimeLibcallImpl; +def __strndup : RuntimeLibcallImpl; +def __strtok_r : RuntimeLibcallImpl; +def __vsnprintf_chk : RuntimeLibcallImpl; +def __vsprintf_chk : RuntimeLibcallImpl; +def abs : RuntimeLibcallImpl; +def access : RuntimeLibcallImpl; +def aligned_alloc : RuntimeLibcallImpl; +def atof : RuntimeLibcallImpl; +def atoi : RuntimeLibcallImpl; +def atol : RuntimeLibcallImpl; +def atoll : RuntimeLibcallImpl; +def bcmp : RuntimeLibcallImpl; +def bcopy : RuntimeLibcallImpl; +def cabs : RuntimeLibcallImpl; +def cabsf : RuntimeLibcallImpl; +defm cabsl : LibmLongDoubleLibCall; +def chmod : RuntimeLibcallImpl; +def chown : RuntimeLibcallImpl; +def clearerr : RuntimeLibcallImpl; +def closedir : RuntimeLibcallImpl; +def ctermid : RuntimeLibcallImpl; +def execl : RuntimeLibcallImpl; +def execle : RuntimeLibcallImpl; +def execlp : RuntimeLibcallImpl; +def execv : RuntimeLibcallImpl; +def execvP : RuntimeLibcallImpl; +def execve : RuntimeLibcallImpl; +def execvp : RuntimeLibcallImpl; +def execvpe : RuntimeLibcallImpl; +def fclose : RuntimeLibcallImpl; +def fdopen : RuntimeLibcallImpl; +def feof : RuntimeLibcallImpl; +def ferror : RuntimeLibcallImpl; +def fflush : RuntimeLibcallImpl; +def ffs : RuntimeLibcallImpl; +def ffsl : RuntimeLibcallImpl; +def ffsll : RuntimeLibcallImpl; +def fgetc : RuntimeLibcallImpl; +def fgetc_unlocked : RuntimeLibcallImpl; +def fgetpos : RuntimeLibcallImpl; +def fgets : RuntimeLibcallImpl; +def fgets_unlocked : RuntimeLibcallImpl; +def fileno : RuntimeLibcallImpl; +def fiprintf : RuntimeLibcallImpl; +def flockfile : RuntimeLibcallImpl; +def fls : RuntimeLibcallImpl; +def flsl : RuntimeLibcallImpl; +def flsll : RuntimeLibcallImpl; +def fopen : RuntimeLibcallImpl; +def fopen64 : RuntimeLibcallImpl; +def fork : RuntimeLibcallImpl; +def fprintf : RuntimeLibcallImpl; +def fputc : RuntimeLibcallImpl; +def fputc_unlocked : RuntimeLibcallImpl; +def fputs : RuntimeLibcallImpl; +def fputs_unlocked : RuntimeLibcallImpl; +def fread : RuntimeLibcallImpl; +def fread_unlocked : RuntimeLibcallImpl; +def fscanf : RuntimeLibcallImpl; +def fseek : RuntimeLibcallImpl; +def fseeko : RuntimeLibcallImpl; +def fseeko64 : RuntimeLibcallImpl; +def fsetpos : RuntimeLibcallImpl; +def fstat : RuntimeLibcallImpl; +def fstat64 : RuntimeLibcallImpl; +def fstatvfs : RuntimeLibcallImpl; +def fstatvfs64 : RuntimeLibcallImpl; +def ftell : RuntimeLibcallImpl; +def ftello : RuntimeLibcallImpl; +def ftello64 : RuntimeLibcallImpl; +def ftrylockfile : RuntimeLibcallImpl; +def funlockfile : RuntimeLibcallImpl; +def fwrite : RuntimeLibcallImpl; +def fwrite_unlocked : RuntimeLibcallImpl; +def getc : RuntimeLibcallImpl; +def getc_unlocked : RuntimeLibcallImpl; +def getchar : RuntimeLibcallImpl; +def getchar_unlocked : RuntimeLibcallImpl; +def getenv : RuntimeLibcallImpl; +def getitimer : RuntimeLibcallImpl; +def getlogin_r : RuntimeLibcallImpl; +def getpwnam : RuntimeLibcallImpl; +def gets : RuntimeLibcallImpl; +def gettimeofday : RuntimeLibcallImpl; +def htonl : RuntimeLibcallImpl; +def htons : RuntimeLibcallImpl; +def iprintf : RuntimeLibcallImpl; +def isascii : RuntimeLibcallImpl; +def isdigit : RuntimeLibcallImpl; +def labs : RuntimeLibcallImpl; +def lchown : RuntimeLibcallImpl; +def llabs : RuntimeLibcallImpl; +def lstat : RuntimeLibcallImpl; +def lstat64 : RuntimeLibcallImpl; +def memalign : RuntimeLibcallImpl; +def memccpy : RuntimeLibcallImpl; +def memchr : RuntimeLibcallImpl; +def memcmp : RuntimeLibcallImpl; +def mempcpy : RuntimeLibcallImpl; +def memrchr : RuntimeLibcallImpl; +def memset_pattern16 : RuntimeLibcallImpl; +def memset_pattern4 : RuntimeLibcallImpl; +def memset_pattern8 : RuntimeLibcallImpl; +def mkdir : RuntimeLibcallImpl; +def mktime : RuntimeLibcallImpl; +def ntohl : RuntimeLibcallImpl; +def ntohs : RuntimeLibcallImpl; +def open : RuntimeLibcallImpl; +def open64 : RuntimeLibcallImpl; +def opendir : RuntimeLibcallImpl; +def pclose : RuntimeLibcallImpl; +def perror : RuntimeLibcallImpl; +def popen : RuntimeLibcallImpl; +def posix_memalign : RuntimeLibcallImpl; +def pread : RuntimeLibcallImpl; +def printf : RuntimeLibcallImpl; +def putc : RuntimeLibcallImpl; +def putc_unlocked : RuntimeLibcallImpl; +def putchar : RuntimeLibcallImpl; +def putchar_unlocked : RuntimeLibcallImpl; +def puts : RuntimeLibcallImpl; +def pvalloc : RuntimeLibcallImpl; +def pwrite : RuntimeLibcallImpl; +def qsort : RuntimeLibcallImpl; +def read : RuntimeLibcallImpl; +def readlink : RuntimeLibcallImpl; +def realloc : RuntimeLibcallImpl; +def reallocf : RuntimeLibcallImpl; +def reallocarray : RuntimeLibcallImpl; +def realpath : RuntimeLibcallImpl; +def remove : RuntimeLibcallImpl; +def rename : RuntimeLibcallImpl; +def rewind : RuntimeLibcallImpl; +def rmdir : RuntimeLibcallImpl; +def scanf : RuntimeLibcallImpl; +def setbuf : RuntimeLibcallImpl; +def setitimer : RuntimeLibcallImpl; +def setvbuf : RuntimeLibcallImpl; +def siprintf : RuntimeLibcallImpl; +def snprintf : RuntimeLibcallImpl; +def sprintf : RuntimeLibcallImpl; +def sscanf : RuntimeLibcallImpl; +def stat : RuntimeLibcallImpl; +def stat64 : RuntimeLibcallImpl; +def statvfs : RuntimeLibcallImpl; +def statvfs64 : RuntimeLibcallImpl; +def stpcpy : RuntimeLibcallImpl; +def stpncpy : RuntimeLibcallImpl; +def strcasecmp : RuntimeLibcallImpl; +def strcat : RuntimeLibcallImpl; +def strchr : RuntimeLibcallImpl; +def strcmp : RuntimeLibcallImpl; +def strcoll : RuntimeLibcallImpl; +def strcpy : RuntimeLibcallImpl; +def strcspn : RuntimeLibcallImpl; +def strdup : RuntimeLibcallImpl; +def strlcat : RuntimeLibcallImpl; +def strlcpy : RuntimeLibcallImpl; +def strlen : RuntimeLibcallImpl; +def strncasecmp : RuntimeLibcallImpl; +def strncat : RuntimeLibcallImpl; +def strncmp : RuntimeLibcallImpl; +def strncpy : RuntimeLibcallImpl; +def strndup : RuntimeLibcallImpl; +def strnlen : RuntimeLibcallImpl; +def strpbrk : RuntimeLibcallImpl; +def strrchr : RuntimeLibcallImpl; +def strspn : RuntimeLibcallImpl; +def strstr : RuntimeLibcallImpl; +def strtod : RuntimeLibcallImpl; +def strtof : RuntimeLibcallImpl; +def strtok : RuntimeLibcallImpl; +def strtok_r : RuntimeLibcallImpl; +def strtol : RuntimeLibcallImpl; +def strtold : RuntimeLibcallImpl; +def strtoll : RuntimeLibcallImpl; +def strtoul : RuntimeLibcallImpl; +def strtoull : RuntimeLibcallImpl; +def strxfrm : RuntimeLibcallImpl; +def system : RuntimeLibcallImpl; +def times : RuntimeLibcallImpl; +def tmpfile : RuntimeLibcallImpl; +def tmpfile64 : RuntimeLibcallImpl; +def toascii : RuntimeLibcallImpl; +def uname : RuntimeLibcallImpl; +def ungetc : RuntimeLibcallImpl; +def unlink : RuntimeLibcallImpl; +def unsetenv : RuntimeLibcallImpl; +def utime : RuntimeLibcallImpl; +def utimes : RuntimeLibcallImpl; +def valloc : RuntimeLibcallImpl; +def vec_calloc : RuntimeLibcallImpl; +def vec_free : RuntimeLibcallImpl; +def vec_malloc : RuntimeLibcallImpl; +def vec_realloc : RuntimeLibcallImpl; +def vfprintf : RuntimeLibcallImpl; +def vfscanf : RuntimeLibcallImpl; +def vprintf : RuntimeLibcallImpl; +def vscanf : RuntimeLibcallImpl; +def vsnprintf : RuntimeLibcallImpl; +def vsprintf : RuntimeLibcallImpl; +def vsscanf : RuntimeLibcallImpl; +def wcslen : RuntimeLibcallImpl; +def write : RuntimeLibcallImpl; + //-------------------------------------------------------------------- // compiler-rt/libgcc but 64-bit only, not available by default //-------------------------------------------------------------------- @@ -1326,6 +1977,11 @@ defvar DarwinSinCosStret = LibcallImpls<(add __sincosf_stret, __sincos_stret, darwinHasSinCosStret>; defvar DarwinExp10 = LibcallImpls<(add __exp10f, __exp10), darwinHasExp10>; +defvar DarwinMemsetPattern = LibcallImpls<(add memset_pattern4, + memset_pattern8, + memset_pattern16), + darwinHasMemsetPattern>; + defvar SecurityCheckCookieIfWinMSVC = LibcallImpls<(add __security_check_cookie, __security_cookie), isWindowsMSVCOrItaniumEnvironment>; @@ -1483,7 +2139,7 @@ def AArch64SystemLibrary : SystemRuntimeLibrary< AArch64LibcallImpls, LibcallImpls<(add Int128RTLibcalls), isAArch64_ILP64>, LibcallImpls<(add bzero), isOSDarwin>, - DarwinExp10, DarwinSinCosStret, + DarwinExp10, DarwinSinCosStret, DarwinMemsetPattern, LibmHasSinCosF32, LibmHasSinCosF64, LibmHasSinCosF128, DefaultLibmExp10, DefaultStackProtector, @@ -1953,7 +2609,7 @@ def ARMSystemLibrary WindowARMFPIntCasts, SecurityCheckCookieIfWinMSVC, AEABIDivRemCalls, - DarwinSinCosStret, DarwinExp10, + DarwinSinCosStret, DarwinExp10, DarwinMemsetPattern, LibmHasSinCosF32, LibmHasSinCosF64, LibmHasSinCosF128, DefaultLibmExp10, @@ -2638,7 +3294,7 @@ defvar MemChkLibcalls = [__memcpy_chk, __memset_chk, __memmove_chk]; defvar X86CommonLibcalls = (add (sub WinDefaultLibcallImpls, WindowsDivRemMulLibcallOverrides, MemChkLibcalls), - DarwinSinCosStret, DarwinExp10, + DarwinSinCosStret, DarwinExp10, DarwinMemsetPattern, X86_F128_Libcalls, LibmHasSinCosF80, // FIXME: Depends on long double SinCosF32F64Libcalls, diff --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h index c2f15b81da02c..5722213347d51 100644 --- a/llvm/include/llvm/MC/MCInstrDesc.h +++ b/llvm/include/llvm/MC/MCInstrDesc.h @@ -49,8 +49,7 @@ enum OperandConstraint { /// private, all access should go through the MCOperandInfo accessors. /// See the accessors for a description of what these are. enum OperandFlags { - LookupPtrRegClass = 0, - LookupRegClassByHwMode, + LookupRegClassByHwMode = 0, Predicate, OptionalDef, BranchTarget @@ -90,9 +89,6 @@ class MCOperandInfo { /// operand is a register. If LookupRegClassByHwMode is set, then this is an /// index into a table in TargetInstrInfo or MCInstrInfo which contains the /// real register class ID. - /// - /// If isLookupPtrRegClass is set, then this is an index that is passed to - /// TargetRegisterInfo::getPointerRegClass(x) to get a dynamic register class. int16_t RegClass; /// These are flags from the MCOI::OperandFlags enum. @@ -104,13 +100,6 @@ class MCOperandInfo { /// Operand constraints (see OperandConstraint enum). uint16_t Constraints; - /// Set if this operand is a pointer value and it requires a callback - /// to look up its register class. - // TODO: Deprecated in favor of isLookupRegClassByHwMode - bool isLookupPtrRegClass() const { - return Flags & (1 << MCOI::LookupPtrRegClass); - } - /// Set if this operand is a value that requires the current hwmode to look up /// its register class. bool isLookupRegClassByHwMode() const { diff --git a/llvm/include/llvm/MC/MCObjectStreamer.h b/llvm/include/llvm/MC/MCObjectStreamer.h index d9aecd881b51c..3c5a6ce42e4f8 100644 --- a/llvm/include/llvm/MC/MCObjectStreamer.h +++ b/llvm/include/llvm/MC/MCObjectStreamer.h @@ -77,7 +77,7 @@ class MCObjectStreamer : public MCStreamer { /// Object streamers require the integrated assembler. bool isIntegratedAssemblerRequired() const override { return true; } - void emitFrames(MCAsmBackend *MAB); + void emitFrames(); MCSymbol *emitCFILabel() override; void emitCFISections(bool EH, bool Debug, bool SFrame) override; diff --git a/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h b/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h index adfdccdb5ab77..168131b43cca8 100644 --- a/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h +++ b/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h @@ -22,6 +22,7 @@ namespace llvm { class MCTargetOptions; enum class EmitDwarfUnwindType; +class StringRef; namespace mc { @@ -62,9 +63,9 @@ LLVM_ABI bool getX86RelaxRelocations(); LLVM_ABI bool getX86Sse2Avx(); -LLVM_ABI std::string getABIName(); +LLVM_ABI StringRef getABIName(); -LLVM_ABI std::string getAsSecureLogFile(); +LLVM_ABI StringRef getAsSecureLogFile(); /// Create this object with static storage to register mc-related command /// line options. diff --git a/llvm/include/llvm/SandboxIR/Constant.h b/llvm/include/llvm/SandboxIR/Constant.h index 6f682a7059d10..2fe923f6c3866 100644 --- a/llvm/include/llvm/SandboxIR/Constant.h +++ b/llvm/include/llvm/SandboxIR/Constant.h @@ -1363,7 +1363,8 @@ class ConstantPtrAuth final : public Constant { public: /// Return a pointer signed with the specified parameters. LLVM_ABI static ConstantPtrAuth *get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc); + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol); /// The pointer that is signed in this ptrauth signed pointer. LLVM_ABI Constant *getPointer() const; @@ -1378,6 +1379,8 @@ class ConstantPtrAuth final : public Constant { /// the only global-initializer user of the ptrauth signed pointer. LLVM_ABI Constant *getAddrDiscriminator() const; + Constant *getDeactivationSymbol() const; + /// Whether there is any non-null address discriminator. bool hasAddressDiscriminator() const { return cast(Val)->hasAddressDiscriminator(); diff --git a/llvm/include/llvm/Support/AllocToken.h b/llvm/include/llvm/Support/AllocToken.h index e40d8163a9d7c..1dc3a0cacef24 100644 --- a/llvm/include/llvm/Support/AllocToken.h +++ b/llvm/include/llvm/Support/AllocToken.h @@ -46,6 +46,9 @@ inline constexpr AllocTokenMode DefaultAllocTokenMode = LLVM_ABI std::optional getAllocTokenModeFromString(StringRef Name); +/// Returns the canonical string name for the given AllocTokenMode. +LLVM_ABI StringRef getAllocTokenModeAsString(AllocTokenMode Mode); + /// Metadata about an allocation used to generate a token ID. struct AllocTokenMetadata { SmallString<64> TypeName; diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td index 6abde996e6dc8..54162dc6bb30f 100644 --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -694,6 +694,7 @@ class Instruction : InstructionEncoding { // If so, make sure to override // TargetInstrInfo::getInsertSubregLikeInputs. bit variadicOpsAreDefs = false; // Are variadic operands definitions? + bit supportsDeactivationSymbol = false; // Does the instruction have side effects that are not captured by any // operands of the instruction or other flags? @@ -918,16 +919,23 @@ def slice; def encoder; def decoder; -/// PointerLikeRegClass - Values that are designed to have pointer width are -/// derived from this. TableGen treats the register class as having a symbolic -/// type that it doesn't know, and resolves the actual regclass to use by using -/// the TargetRegisterInfo::getPointerRegClass() hook at codegen time. -/// -/// This is deprecated in favor of RegClassByHwMode. +/// PointerLikeRegClass - Pseudoinstruction operands that are designed +/// to have pointer width are derived from this. This should only be +/// used by StandardPseudoInstruction instructions. No target specific +/// instruction should use this. class PointerLikeRegClass { int RegClassKind = Kind; } +/// ptr_rc definition - Mark this operand as being a pointer value +/// whose register class needs to be defined by the target. Targets +/// should provide instruction definition overrides which substitute +/// the uses of this with the backend defined RegisterClass or +/// RegClassByHwMode to use for pointer virtual registers for a +/// particular opcode (typically by defining a subsitute instruction +/// with RemapPointerOperands). +def ptr_rc : PointerLikeRegClass<0>; + /// RegClassByHwMode - Operands that change the register class based /// on the subtarget are derived from this. TableGen /// treats the register class as having a symbolic kind that it @@ -941,13 +949,6 @@ class RegClassByHwMode Modes, list Objects = RegClasses; } -/// ptr_rc definition - Mark this operand as being a pointer value whose -/// register class is resolved dynamically via a callback to TargetInstrInfo. -/// FIXME: We should probably change this to a class which contain a list of -/// flags. But currently we have but one flag. -// Deprecated, use RegClassByHwMode instead. -def ptr_rc : PointerLikeRegClass<0>; - /// unknown definition - Mark this operand as being of unknown type, causing /// it to be resolved by inference in the context it is used. class unknown_class; diff --git a/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h b/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h index 8e7df5e6b10f0..e4bfcd395c2d6 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h +++ b/llvm/include/llvm/Transforms/Instrumentation/BoundsChecking.h @@ -11,6 +11,7 @@ #include "llvm/IR/PassManager.h" #include "llvm/Support/Compiler.h" +#include "llvm/TargetParser/Triple.h" #include namespace llvm { @@ -23,10 +24,12 @@ class BoundsCheckingPass : public PassInfoMixin { public: struct Options { struct Runtime { - Runtime(bool MinRuntime, bool MayReturn) - : MinRuntime(MinRuntime), MayReturn(MayReturn) {} + Runtime(bool MinRuntime, bool MayReturn, bool HandlerPreserveAllRegs) + : MinRuntime(MinRuntime), MayReturn(MayReturn), + HandlerPreserveAllRegs(HandlerPreserveAllRegs) {} bool MinRuntime; bool MayReturn; + bool HandlerPreserveAllRegs; }; std::optional Rt; // Trap if empty. bool Merge = false; diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp index 4064b25d9d4e7..8a8c2277012ec 100644 --- a/llvm/lib/Analysis/Delinearization.cpp +++ b/llvm/lib/Analysis/Delinearization.cpp @@ -704,44 +704,6 @@ bool llvm::getIndexExpressionsFromGEP(ScalarEvolution &SE, return !Subscripts.empty(); } -bool llvm::tryDelinearizeFixedSizeImpl( - ScalarEvolution *SE, Instruction *Inst, const SCEV *AccessFn, - SmallVectorImpl &Subscripts, SmallVectorImpl &Sizes) { - Value *SrcPtr = getLoadStorePointerOperand(Inst); - - // Check the simple case where the array dimensions are fixed size. - auto *SrcGEP = dyn_cast(SrcPtr); - if (!SrcGEP) - return false; - - getIndexExpressionsFromGEP(*SE, SrcGEP, Subscripts, Sizes); - - // Check that the two size arrays are non-empty and equal in length and - // value. - // TODO: it would be better to let the caller to clear Subscripts, similar - // to how we handle Sizes. - if (Sizes.empty() || Subscripts.size() <= 1) { - Subscripts.clear(); - return false; - } - - // Check that for identical base pointers we do not miss index offsets - // that have been added before this GEP is applied. - Value *SrcBasePtr = SrcGEP->getOperand(0)->stripPointerCasts(); - const SCEVUnknown *SrcBase = - dyn_cast(SE->getPointerBase(AccessFn)); - if (!SrcBase || SrcBasePtr != SrcBase->getValue()) { - Subscripts.clear(); - return false; - } - - assert(Subscripts.size() == Sizes.size() + 1 && - "Expected equal number of entries in the list of size and " - "subscript."); - - return true; -} - namespace { void printDelinearization(raw_ostream &O, Function *F, LoopInfo *LI, diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp index e0e2be8e35929..3bba2e8c0d8ad 100644 --- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp +++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp @@ -368,8 +368,16 @@ bool IndexedReference::tryDelinearizeFixedSize( // the load/store instruction being analyzed. It is not needed for further // analysis. // TODO: Maybe this property should be enforced in delinearizeFixedSizeArray. +#ifndef NDEBUG assert(!Sizes.empty() && Subscripts.size() == Sizes.size() && - Sizes.back() == ElementSize && "Unexpected delinearization result"); + "Inconsistent length of Sizes and Subscripts"); + Type *WideTy = + SE.getWiderType(ElementSize->getType(), Sizes.back()->getType()); + const SCEV *ElemSizeExt = SE.getNoopOrZeroExtend(ElementSize, WideTy); + const SCEV *LastSizeExt = SE.getNoopOrZeroExtend(Sizes.back(), WideTy); + assert(ElemSizeExt == LastSizeExt && "Unexpected last element of Sizes"); +#endif + Sizes.pop_back(); return true; } diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 61d5c2c81df2e..c3678d37607d5 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -4250,11 +4250,13 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) { } case lltok::kw_ptrauth: { // ValID ::= 'ptrauth' '(' ptr @foo ',' i32 - // (',' i64 (',' ptr addrdisc)? )? ')' + // (',' i64 (',' ptr addrdisc (',' ptr ds)? + // )? )? ')' Lex.Lex(); Constant *Ptr, *Key; - Constant *Disc = nullptr, *AddrDisc = nullptr; + Constant *Disc = nullptr, *AddrDisc = nullptr, + *DeactivationSymbol = nullptr; if (parseToken(lltok::lparen, "expected '(' in constant ptrauth expression") || @@ -4263,11 +4265,14 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) { "expected comma in constant ptrauth expression") || parseGlobalTypeAndValue(Key)) return true; - // If present, parse the optional disc/addrdisc. - if (EatIfPresent(lltok::comma)) - if (parseGlobalTypeAndValue(Disc) || - (EatIfPresent(lltok::comma) && parseGlobalTypeAndValue(AddrDisc))) - return true; + // If present, parse the optional disc/addrdisc/ds. + if (EatIfPresent(lltok::comma) && parseGlobalTypeAndValue(Disc)) + return true; + if (EatIfPresent(lltok::comma) && parseGlobalTypeAndValue(AddrDisc)) + return true; + if (EatIfPresent(lltok::comma) && + parseGlobalTypeAndValue(DeactivationSymbol)) + return true; if (parseToken(lltok::rparen, "expected ')' in constant ptrauth expression")) return true; @@ -4298,7 +4303,15 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) { AddrDisc = ConstantPointerNull::get(PointerType::get(Context, 0)); } - ID.ConstantVal = ConstantPtrAuth::get(Ptr, KeyC, DiscC, AddrDisc); + if (!DeactivationSymbol) + DeactivationSymbol = + ConstantPointerNull::get(PointerType::get(Context, 0)); + if (!DeactivationSymbol->getType()->isPointerTy()) + return error(ID.Loc, + "constant ptrauth deactivation symbol must be a pointer"); + + ID.ConstantVal = + ConstantPtrAuth::get(Ptr, KeyC, DiscC, AddrDisc, DeactivationSymbol); ID.Kind = ValID::t_Constant; return false; } diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 9f3bb230440fb..04cb0a699ebbf 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1609,7 +1609,16 @@ Expected BitcodeReader::materializeValue(unsigned StartValID, if (!Disc) return error("ptrauth disc operand must be ConstantInt"); - C = ConstantPtrAuth::get(ConstOps[0], Key, Disc, ConstOps[3]); + Constant *DeactivationSymbol = + ConstOps.size() > 4 ? ConstOps[4] + : ConstantPointerNull::get(cast( + ConstOps[3]->getType())); + if (!DeactivationSymbol->getType()->isPointerTy()) + return error( + "ptrauth deactivation symbol operand must be a pointer"); + + C = ConstantPtrAuth::get(ConstOps[0], Key, Disc, ConstOps[3], + DeactivationSymbol); break; } case BitcodeConstant::NoCFIOpcode: { @@ -3813,6 +3822,16 @@ Error BitcodeReader::parseConstants() { (unsigned)Record[2], (unsigned)Record[3]}); break; } + case bitc::CST_CODE_PTRAUTH2: { + if (Record.size() < 5) + return error("Invalid ptrauth record"); + // Ptr, Key, Disc, AddrDisc, DeactivationSymbol + V = BitcodeConstant::create( + Alloc, CurTy, BitcodeConstant::ConstantPtrAuthOpcode, + {(unsigned)Record[0], (unsigned)Record[1], (unsigned)Record[2], + (unsigned)Record[3], (unsigned)Record[4]}); + break; + } } assert(V->getType() == getTypeByID(CurTyID) && "Incorrect result type ID"); diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index 1d0461478b90c..0dd3fa3361fee 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -3030,11 +3030,12 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal, Record.push_back(VE.getTypeID(NC->getGlobalValue()->getType())); Record.push_back(VE.getValueID(NC->getGlobalValue())); } else if (const auto *CPA = dyn_cast(C)) { - Code = bitc::CST_CODE_PTRAUTH; + Code = bitc::CST_CODE_PTRAUTH2; Record.push_back(VE.getValueID(CPA->getPointer())); Record.push_back(VE.getValueID(CPA->getKey())); Record.push_back(VE.getValueID(CPA->getDiscriminator())); Record.push_back(VE.getValueID(CPA->getAddrDiscriminator())); + Record.push_back(VE.getValueID(CPA->getDeactivationSymbol())); } else { #ifndef NDEBUG C->dump(); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 751d3735d3b2b..2e4a26ef70bc2 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -493,10 +493,12 @@ void DwarfCompileUnit::attachLowHighPC(DIE &D, const MCSymbol *Begin, assert(End->isDefined() && "Invalid end label"); addLabelAddress(D, dwarf::DW_AT_low_pc, Begin); - if (DD->getDwarfVersion() < 4) - addLabelAddress(D, dwarf::DW_AT_high_pc, End); - else + if (DD->getDwarfVersion() >= 4 && + (!isDwoUnit() || !llvm::isRangeRelaxable(Begin, End))) { addLabelDelta(D, dwarf::DW_AT_high_pc, End, Begin); + return; + } + addLabelAddress(D, dwarf::DW_AT_high_pc, End); } // Add info for Wasm-global-based relocation. diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index 7be7468300569..e2ed45eec0ecd 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -196,6 +196,10 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type"); } + if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_deactivation_symbol)) { + Info.DeactivationSymbol = cast(Bundle->Inputs[0]); + } + Info.CB = &CB; Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); Info.CallConv = CallConv; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 2ec138b6e186d..e0665d99a891d 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2917,6 +2917,9 @@ bool IRTranslator::translateIntrinsic( } } + if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_deactivation_symbol)) + MIB->setDeactivationSymbol(*MF, Bundle->Inputs[0].get()); + return true; } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 637acd61c8a5f..3906b311addf0 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -38,8 +38,10 @@ void MachineIRBuilder::setMF(MachineFunction &MF) { //------------------------------------------------------------------------------ MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { - return BuildMI(getMF(), {getDL(), getPCSections(), getMMRAMetadata()}, - getTII().get(Opcode)); + return BuildMI( + getMF(), + {getDL(), getPCSections(), getMMRAMetadata(), getDeactivationSymbol()}, + getTII().get(Opcode)); } MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp index 8b72c295416a2..dbd56c7414f38 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp +++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp @@ -281,6 +281,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) { .Case("heap-alloc-marker", MIToken::kw_heap_alloc_marker) .Case("pcsections", MIToken::kw_pcsections) .Case("cfi-type", MIToken::kw_cfi_type) + .Case("deactivation-symbol", MIToken::kw_deactivation_symbol) .Case("bbsections", MIToken::kw_bbsections) .Case("bb_id", MIToken::kw_bb_id) .Case("unknown-size", MIToken::kw_unknown_size) diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.h b/llvm/lib/CodeGen/MIRParser/MILexer.h index 0627f176b9e00..0407a0e7540d7 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.h +++ b/llvm/lib/CodeGen/MIRParser/MILexer.h @@ -136,6 +136,7 @@ struct MIToken { kw_heap_alloc_marker, kw_pcsections, kw_cfi_type, + kw_deactivation_symbol, kw_bbsections, kw_bb_id, kw_unknown_size, diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 434a579c3be3f..f35274d4e2edf 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -1072,6 +1072,7 @@ bool MIParser::parse(MachineInstr *&MI) { Token.isNot(MIToken::kw_heap_alloc_marker) && Token.isNot(MIToken::kw_pcsections) && Token.isNot(MIToken::kw_cfi_type) && + Token.isNot(MIToken::kw_deactivation_symbol) && Token.isNot(MIToken::kw_debug_location) && Token.isNot(MIToken::kw_debug_instr_number) && Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) { @@ -1120,6 +1121,14 @@ bool MIParser::parse(MachineInstr *&MI) { lex(); } + GlobalValue *DS = nullptr; + if (Token.is(MIToken::kw_deactivation_symbol)) { + lex(); + if (parseGlobalValue(DS)) + return true; + lex(); + } + unsigned InstrNum = 0; if (Token.is(MIToken::kw_debug_instr_number)) { lex(); @@ -1196,6 +1205,8 @@ bool MIParser::parse(MachineInstr *&MI) { MI->setPCSections(MF, PCSections); if (CFIType) MI->setCFIType(MF, CFIType); + if (DS) + MI->setDeactivationSymbol(MF, DS); if (!MemOperands.empty()) MI->setMemRefs(MF, MemOperands); if (InstrNum) diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp index 1d54d72336860..c0554497653f8 100644 --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -19,6 +19,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MIRFormatter.h" #include "llvm/CodeGen/MIRYamlMapping.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineConstantPool.h" @@ -895,6 +896,10 @@ static void printMI(raw_ostream &OS, MFPrintState &State, } if (uint32_t CFIType = MI.getCFIType()) OS << LS << "cfi-type " << CFIType; + if (Value *DS = MI.getDeactivationSymbol()) { + OS << LS << "deactivation-symbol "; + MIRFormatter::printIRValue(OS, *DS, State.MST); + } if (auto Num = MI.peekDebugInstrNum()) OS << LS << "debug-instr-number " << Num; diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index bfa5ab274c686..634547ded992f 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -609,10 +609,10 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( ArrayRef MMOs, MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, - uint32_t CFIType, MDNode *MMRAs) { + uint32_t CFIType, MDNode *MMRAs, Value *DS) { return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol, PostInstrSymbol, HeapAllocMarker, - PCSections, CFIType, MMRAs); + PCSections, CFIType, MMRAs, DS); } const char *MachineFunction::createExternalSymbolName(StringRef Name) { diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index eb46124d9eb5f..18111156efa4f 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -322,15 +322,17 @@ void MachineInstr::setExtraInfo(MachineFunction &MF, MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, - uint32_t CFIType, MDNode *MMRAs) { + uint32_t CFIType, MDNode *MMRAs, Value *DS) { bool HasPreInstrSymbol = PreInstrSymbol != nullptr; bool HasPostInstrSymbol = PostInstrSymbol != nullptr; bool HasHeapAllocMarker = HeapAllocMarker != nullptr; bool HasPCSections = PCSections != nullptr; bool HasCFIType = CFIType != 0; bool HasMMRAs = MMRAs != nullptr; + bool HasDS = DS != nullptr; int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol + - HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs; + HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs + + HasDS; // Drop all extra info if there is none. if (NumPointers <= 0) { @@ -343,10 +345,10 @@ void MachineInstr::setExtraInfo(MachineFunction &MF, // 32-bit pointers. // FIXME: Maybe we should make the symbols in the extra info mutable? else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections || - HasCFIType) { + HasCFIType || HasDS) { Info.set( MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol, - HeapAllocMarker, PCSections, CFIType, MMRAs)); + HeapAllocMarker, PCSections, CFIType, MMRAs, DS)); return; } @@ -365,7 +367,7 @@ void MachineInstr::dropMemRefs(MachineFunction &MF) { setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(), getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setMemRefs(MachineFunction &MF, @@ -377,7 +379,7 @@ void MachineInstr::setMemRefs(MachineFunction &MF, setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(), getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::addMemOperand(MachineFunction &MF, @@ -488,7 +490,7 @@ void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(), getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { @@ -504,7 +506,7 @@ void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) { setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol, getHeapAllocMarker(), getPCSections(), getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) { @@ -513,7 +515,8 @@ void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) { return; setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), - Marker, getPCSections(), getCFIType(), getMMRAMetadata()); + Marker, getPCSections(), getCFIType(), getMMRAMetadata(), + getDeactivationSymbol()); } void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) { @@ -523,7 +526,7 @@ void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) { setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), getHeapAllocMarker(), PCSections, getCFIType(), - getMMRAMetadata()); + getMMRAMetadata(), getDeactivationSymbol()); } void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) { @@ -532,7 +535,8 @@ void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) { return; setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), - getHeapAllocMarker(), getPCSections(), Type, getMMRAMetadata()); + getHeapAllocMarker(), getPCSections(), Type, getMMRAMetadata(), + getDeactivationSymbol()); } void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) { @@ -541,7 +545,18 @@ void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) { return; setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), - getHeapAllocMarker(), getPCSections(), getCFIType(), MMRAs); + getHeapAllocMarker(), getPCSections(), getCFIType(), MMRAs, + getDeactivationSymbol()); +} + +void MachineInstr::setDeactivationSymbol(MachineFunction &MF, Value *DS) { + // Do nothing if old and new symbols are the same. + if (DS == getDeactivationSymbol()) + return; + + setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(), + getHeapAllocMarker(), getPCSections(), getCFIType(), + getMMRAMetadata(), DS); } void MachineInstr::cloneInstrSymbols(MachineFunction &MF, @@ -730,6 +745,8 @@ bool MachineInstr::isIdenticalTo(const MachineInstr &Other, // Call instructions with different CFI types are not identical. if (isCall() && getCFIType() != Other.getCFIType()) return false; + if (getDeactivationSymbol() != Other.getDeactivationSymbol()) + return false; return true; } @@ -2037,6 +2054,8 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, OS << ','; OS << " cfi-type " << CFIType; } + if (getDeactivationSymbol()) + OS << ", deactivation-symbol " << getDeactivationSymbol()->getName(); if (DebugInstrNum) { if (!FirstOp) diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 52e8449fe510c..4ad721bf21959 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -15,10 +15,12 @@ #include "InstrEmitter.h" #include "SDNodeDbgValue.h" #include "llvm/BinaryFormat/Dwarf.h" +#include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetLowering.h" @@ -61,6 +63,8 @@ static unsigned countOperands(SDNode *Node, unsigned NumExpUses, unsigned N = Node->getNumOperands(); while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) --N; + if (N && Node->getOperand(N - 1).getOpcode() == ISD::DEACTIVATION_SYMBOL) + --N; // Ignore deactivation symbol if it exists. if (N && Node->getOperand(N - 1).getValueType() == MVT::Other) --N; // Ignore chain if it exists. @@ -1222,15 +1226,23 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, } } - if (SDNode *GluedNode = Node->getGluedNode()) { - // FIXME: Possibly iterate over multiple glue nodes? - if (GluedNode->getOpcode() == - ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) { - Register VReg = getVR(GluedNode->getOperand(0), VRBaseMap); - MachineOperand MO = MachineOperand::CreateReg(VReg, /*isDef=*/false, - /*isImp=*/true); - MIB->addOperand(MO); - } + unsigned Op = Node->getNumOperands(); + if (Op != 0 && Node->getOperand(Op - 1)->getOpcode() == + ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) { + Register VReg = getVR(Node->getOperand(Op - 1)->getOperand(0), VRBaseMap); + MachineOperand MO = MachineOperand::CreateReg(VReg, /*isDef=*/false, + /*isImp=*/true); + MIB->addOperand(MO); + Op--; + } + + if (Op != 0 && + Node->getOperand(Op - 1)->getOpcode() == ISD::DEACTIVATION_SYMBOL) { + MI->setDeactivationSymbol( + *MF, const_cast( + cast(Node->getOperand(Op - 1)) + ->getGlobal())); + Op--; } // Run post-isel target hook to adjust this instruction if needed. @@ -1251,7 +1263,8 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, llvm_unreachable("This target-independent node should have been selected!"); case ISD::EntryToken: case ISD::MERGE_VALUES: - case ISD::TokenFactor: // fall thru + case ISD::TokenFactor: + case ISD::DEACTIVATION_SYMBOL: break; case ISD::CopyToReg: { Register DestReg = cast(Node->getOperand(1))->getReg(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 1b15a207a2d37..06735708d5369 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1916,6 +1916,21 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, return SDValue(N, 0); } +SDValue SelectionDAG::getDeactivationSymbol(const GlobalValue *GV) { + SDVTList VTs = getVTList(MVT::Untyped); + FoldingSetNodeID ID; + AddNodeIDNode(ID, ISD::DEACTIVATION_SYMBOL, VTs, {}); + ID.AddPointer(GV); + void *IP = nullptr; + if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) + return SDValue(E, 0); + + auto *N = newSDNode(GV, VTs); + CSEMap.InsertNode(N, IP); + InsertNode(N); + return SDValue(N, 0); +} + SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; SDVTList VTs = getVTList(VT); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 88b35582a9f7d..53d73ad618bd1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -45,6 +45,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SelectionDAGTargetInfo.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/SwiftErrorValueTracking.h" @@ -5376,6 +5377,14 @@ SmallVector SelectionDAGBuilder::getTargetIntrinsicOperands( } } + if (std::optional Bundle = + I.getOperandBundle(LLVMContext::OB_deactivation_symbol)) { + auto *Sym = Bundle->Inputs[0].get(); + SDValue SDSym = getValue(Sym); + SDSym = DAG.getDeactivationSymbol(cast(Sym)); + Ops.push_back(SDSym); + } + if (std::optional Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) { Value *Token = Bundle->Inputs[0].get(); @@ -9116,6 +9125,11 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, ConvControlToken = getValue(Token); } + GlobalValue *DeactivationSymbol = nullptr; + if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_deactivation_symbol)) { + DeactivationSymbol = cast(Bundle->Inputs[0].get()); + } + TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(getCurSDLoc()) .setChain(getRoot()) @@ -9125,7 +9139,8 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, .setIsPreallocated( CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0) .setCFIType(CFIType) - .setConvergenceControlToken(ConvControlToken); + .setConvergenceControlToken(ConvControlToken) + .setDeactivationSymbol(DeactivationSymbol); // Set the pointer authentication info if we have it. if (PAI) { @@ -9745,7 +9760,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { {LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated, LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi, - LLVMContext::OB_convergencectrl}); + LLVMContext::OB_convergencectrl, LLVMContext::OB_deactivation_symbol}); SDValue Callee = getValue(I.getCalledOperand()); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 0fad4722b1871..dd8f18d3b8a6a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -3308,6 +3308,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, case ISD::LIFETIME_START: case ISD::LIFETIME_END: case ISD::PSEUDO_PROBE: + case ISD::DEACTIVATION_SYMBOL: NodeToMatch->setNodeId(-1); // Mark selected. return; case ISD::AssertSext: @@ -3389,7 +3390,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, // These are the current input chain and glue for use when generating nodes. // Various Emit operations change these. For example, emitting a copytoreg // uses and updates these. - SDValue InputChain, InputGlue; + SDValue InputChain, InputGlue, DeactivationSymbol; // ChainNodesMatched - If a pattern matches nodes that have input/output // chains, the OPC_EmitMergeInputChains operation is emitted which indicates @@ -3542,6 +3543,15 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, InputGlue = N->getOperand(N->getNumOperands()-1); continue; + case OPC_CaptureDeactivationSymbol: + // If the current node has a deactivation symbol, capture it in + // DeactivationSymbol. + if (N->getNumOperands() != 0 && + N->getOperand(N->getNumOperands() - 1).getOpcode() == + ISD::DEACTIVATION_SYMBOL) + DeactivationSymbol = N->getOperand(N->getNumOperands() - 1); + continue; + case OPC_MoveChild: { unsigned ChildNo = MatcherTable[MatcherIndex++]; if (ChildNo >= N.getNumOperands()) @@ -4223,6 +4233,8 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, // If this has chain/glue inputs, add them. if (EmitNodeInfo & OPFL_Chain) Ops.push_back(InputChain); + if (DeactivationSymbol.getNode() != nullptr) + Ops.push_back(DeactivationSymbol); if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr) Ops.push_back(InputGlue); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index d503d7a2345fd..fef3a3663d3a8 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -66,10 +66,6 @@ const TargetRegisterClass *TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, const MCOperandInfo &OpInfo = MCID.operands()[OpNum]; int16_t RegClass = getOpRegClassID(OpInfo); - // TODO: Remove isLookupPtrRegClass in favor of isLookupRegClassByHwMode - if (OpInfo.isLookupPtrRegClass()) - return TRI.getPointerRegClass(RegClass); - // Instructions like INSERT_SUBREG do not have fixed register classes. if (RegClass < 0) return nullptr; diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 5101717526263..c962368859730 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -2465,7 +2465,8 @@ Value *OpenMPIRBuilder::createRuntimeShuffleFunction(InsertPointTy AllocaIP, void OpenMPIRBuilder::shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr, Type *ElemType, - Value *Offset, Type *ReductionArrayTy) { + Value *Offset, Type *ReductionArrayTy, + bool IsByRefElem) { uint64_t Size = M.getDataLayout().getTypeStoreSize(ElemType); // Create the loop over the big sized data. // ptr = (void*)Elem; @@ -2547,10 +2548,10 @@ void OpenMPIRBuilder::shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, } } -void OpenMPIRBuilder::emitReductionListCopy( +Error OpenMPIRBuilder::emitReductionListCopy( InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy, ArrayRef ReductionInfos, Value *SrcBase, Value *DestBase, - CopyOptionsTy CopyOptions) { + ArrayRef IsByRef, CopyOptionsTy CopyOptions) { Type *IndexTy = Builder.getIndexTy( M.getDataLayout(), M.getDataLayout().getDefaultGlobalsAddressSpace()); Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; @@ -2560,6 +2561,7 @@ void OpenMPIRBuilder::emitReductionListCopy( for (auto En : enumerate(ReductionInfos)) { const ReductionInfo &RI = En.value(); Value *SrcElementAddr = nullptr; + AllocaInst *DestAlloca = nullptr; Value *DestElementAddr = nullptr; Value *DestElementPtrAddr = nullptr; // Should we shuffle in an element from a remote lane? @@ -2579,14 +2581,18 @@ void OpenMPIRBuilder::emitReductionListCopy( DestElementPtrAddr = Builder.CreateInBoundsGEP( ReductionArrayTy, DestBase, {ConstantInt::get(IndexTy, 0), ConstantInt::get(IndexTy, En.index())}); + bool IsByRefElem = (!IsByRef.empty() && IsByRef[En.index()]); switch (Action) { case CopyAction::RemoteLaneToThread: { InsertPointTy CurIP = Builder.saveIP(); Builder.restoreIP(AllocaIP); - AllocaInst *DestAlloca = Builder.CreateAlloca(RI.ElementType, nullptr, - ".omp.reduction.element"); + + Type *DestAllocaType = + IsByRefElem ? RI.ByRefAllocatedType : RI.ElementType; + DestAlloca = Builder.CreateAlloca(DestAllocaType, nullptr, + ".omp.reduction.element"); DestAlloca->setAlignment( - M.getDataLayout().getPrefTypeAlign(RI.ElementType)); + M.getDataLayout().getPrefTypeAlign(DestAllocaType)); DestElementAddr = DestAlloca; DestElementAddr = Builder.CreateAddrSpaceCast(DestElementAddr, Builder.getPtrTy(), @@ -2606,8 +2612,57 @@ void OpenMPIRBuilder::emitReductionListCopy( // Now that all active lanes have read the element in the // Reduce list, shuffle over the value from the remote lane. if (ShuffleInElement) { - shuffleAndStore(AllocaIP, SrcElementAddr, DestElementAddr, RI.ElementType, - RemoteLaneOffset, ReductionArrayTy); + Type *ShuffleType = RI.ElementType; + Value *ShuffleSrcAddr = SrcElementAddr; + Value *ShuffleDestAddr = DestElementAddr; + AllocaInst *LocalStorage = nullptr; + + if (IsByRefElem) { + assert(RI.ByRefElementType && "Expected by-ref element type to be set"); + assert(RI.ByRefAllocatedType && + "Expected by-ref allocated type to be set"); + // For by-ref reductions, we need to copy from the remote lane the + // actual value of the partial reduction computed by that remote lane; + // rather than, for example, a pointer to that data or, even worse, a + // pointer to the descriptor of the by-ref reduction element. + ShuffleType = RI.ByRefElementType; + + InsertPointOrErrorTy GenResult = + RI.DataPtrPtrGen(Builder.saveIP(), ShuffleSrcAddr, ShuffleSrcAddr); + + if (!GenResult) + return GenResult.takeError(); + + ShuffleSrcAddr = Builder.CreateLoad(Builder.getPtrTy(), ShuffleSrcAddr); + + { + InsertPointTy OldIP = Builder.saveIP(); + Builder.restoreIP(AllocaIP); + + LocalStorage = Builder.CreateAlloca(ShuffleType); + Builder.restoreIP(OldIP); + ShuffleDestAddr = LocalStorage; + } + } + + shuffleAndStore(AllocaIP, ShuffleSrcAddr, ShuffleDestAddr, ShuffleType, + RemoteLaneOffset, ReductionArrayTy, IsByRefElem); + + if (IsByRefElem) { + Value *GEP; + InsertPointOrErrorTy GenResult = + RI.DataPtrPtrGen(Builder.saveIP(), + Builder.CreatePointerBitCastOrAddrSpaceCast( + DestAlloca, Builder.getPtrTy(), ".ascast"), + GEP); + + if (!GenResult) + return GenResult.takeError(); + + Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( + LocalStorage, Builder.getPtrTy(), ".ascast"), + GEP); + } } else { switch (RI.EvaluationKind) { case EvalKind::Scalar: { @@ -2658,11 +2713,13 @@ void OpenMPIRBuilder::emitReductionListCopy( Builder.CreateStore(CastDestAddr, DestElementPtrAddr); } } + + return Error::success(); } Expected OpenMPIRBuilder::emitInterWarpCopyFunction( const LocationDescription &Loc, ArrayRef ReductionInfos, - AttributeList FuncAttrs) { + AttributeList FuncAttrs, ArrayRef IsByRef) { InsertPointTy SavedIP = Builder.saveIP(); LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get( @@ -2743,7 +2800,9 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( // memory. // const ReductionInfo &RI = En.value(); - unsigned RealTySize = M.getDataLayout().getTypeAllocSize(RI.ElementType); + bool IsByRefElem = !IsByRef.empty() && IsByRef[En.index()]; + unsigned RealTySize = M.getDataLayout().getTypeAllocSize( + IsByRefElem ? RI.ByRefElementType : RI.ElementType); for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /= 2) { Type *CType = Builder.getIntNTy(TySize * 8); @@ -2806,6 +2865,17 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( ConstantInt::get(IndexTy, En.index())}); // elemptr = ((CopyType*)(elemptrptr)) + I Value *ElemPtr = Builder.CreateLoad(Builder.getPtrTy(), ElemPtrPtr); + + if (IsByRefElem) { + InsertPointOrErrorTy GenRes = + RI.DataPtrPtrGen(Builder.saveIP(), ElemPtr, ElemPtr); + + if (!GenRes) + return GenRes.takeError(); + + ElemPtr = Builder.CreateLoad(Builder.getPtrTy(), ElemPtr); + } + if (NumIters > 1) ElemPtr = Builder.CreateGEP(Builder.getInt32Ty(), ElemPtr, Cnt); @@ -2861,6 +2931,17 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( Value *TargetElemPtrVal = Builder.CreateLoad(Builder.getPtrTy(), TargetElemPtrPtr); Value *TargetElemPtr = TargetElemPtrVal; + + if (IsByRefElem) { + InsertPointOrErrorTy GenRes = + RI.DataPtrPtrGen(Builder.saveIP(), TargetElemPtr, TargetElemPtr); + + if (!GenRes) + return GenRes.takeError(); + + TargetElemPtr = Builder.CreateLoad(Builder.getPtrTy(), TargetElemPtr); + } + if (NumIters > 1) TargetElemPtr = Builder.CreateGEP(Builder.getInt32Ty(), TargetElemPtr, Cnt); @@ -2895,9 +2976,9 @@ Expected OpenMPIRBuilder::emitInterWarpCopyFunction( return WcFunc; } -Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( +Expected OpenMPIRBuilder::emitShuffleAndReduceFunction( ArrayRef ReductionInfos, Function *ReduceFn, - AttributeList FuncAttrs) { + AttributeList FuncAttrs, ArrayRef IsByRef) { LLVMContext &Ctx = M.getContext(); FunctionType *FuncTy = FunctionType::get(Builder.getVoidTy(), @@ -2976,9 +3057,13 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( // This loop iterates through the list of reduce elements and copies, // element by element, from a remote lane in the warp to RemoteReduceList, // hosted on the thread's stack. - emitReductionListCopy( + Error EmitRedLsCpRes = emitReductionListCopy( AllocaIP, CopyAction::RemoteLaneToThread, RedListArrayTy, ReductionInfos, - ReduceList, RemoteListAddrCast, {RemoteLaneOffset, nullptr, nullptr}); + ReduceList, RemoteListAddrCast, IsByRef, + {RemoteLaneOffset, nullptr, nullptr}); + + if (EmitRedLsCpRes) + return EmitRedLsCpRes; // The actions to be performed on the Remote Reduce list is dependent // on the algorithm version. @@ -3046,8 +3131,14 @@ Function *OpenMPIRBuilder::emitShuffleAndReduceFunction( Builder.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); emitBlock(CpyThenBB, Builder.GetInsertBlock()->getParent()); - emitReductionListCopy(AllocaIP, CopyAction::ThreadCopy, RedListArrayTy, - ReductionInfos, RemoteListAddrCast, ReduceList); + + EmitRedLsCpRes = emitReductionListCopy( + AllocaIP, CopyAction::ThreadCopy, RedListArrayTy, ReductionInfos, + RemoteListAddrCast, ReduceList, IsByRef); + + if (EmitRedLsCpRes) + return EmitRedLsCpRes; + Builder.CreateBr(CpyMergeBB); emitBlock(CpyElseBB, Builder.GetInsertBlock()->getParent()); @@ -3452,7 +3543,8 @@ std::string OpenMPIRBuilder::getReductionFuncName(StringRef Name) const { Expected OpenMPIRBuilder::createReductionFunction( StringRef ReducerName, ArrayRef ReductionInfos, - ReductionGenCBKind ReductionGenCBKind, AttributeList FuncAttrs) { + ArrayRef IsByRef, ReductionGenCBKind ReductionGenCBKind, + AttributeList FuncAttrs) { auto *FuncTy = FunctionType::get(Builder.getVoidTy(), {Builder.getPtrTy(), Builder.getPtrTy()}, /* IsVarArg */ false); @@ -3513,8 +3605,14 @@ Expected OpenMPIRBuilder::createReductionFunction( LHSPtrs.emplace_back(LHSPtr); RHSPtrs.emplace_back(RHSPtr); } else { - Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); - Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); + Value *LHS = LHSPtr; + Value *RHS = RHSPtr; + + if (!IsByRef.empty() && !IsByRef[En.index()]) { + LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); + RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); + } + Value *Reduced; InsertPointOrErrorTy AfterIP = RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced); @@ -3524,7 +3622,9 @@ Expected OpenMPIRBuilder::createReductionFunction( return ReductionFunc; Builder.restoreIP(*AfterIP); - Builder.CreateStore(Reduced, LHSPtr); + + if (!IsByRef.empty() && !IsByRef[En.index()]) + Builder.CreateStore(Reduced, LHSPtr); } } @@ -3577,9 +3677,9 @@ checkReductionInfos(ArrayRef ReductionInfos, OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef ReductionInfos, - bool IsNoWait, bool IsTeamsReduction, ReductionGenCBKind ReductionGenCBKind, - std::optional GridValue, unsigned ReductionBufNum, - Value *SrcLocInfo) { + ArrayRef IsByRef, bool IsNoWait, bool IsTeamsReduction, + ReductionGenCBKind ReductionGenCBKind, std::optional GridValue, + unsigned ReductionBufNum, Value *SrcLocInfo) { if (!updateToLocation(Loc)) return InsertPointTy(); Builder.restoreIP(CodeGenIP); @@ -3615,9 +3715,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( FuncAttrs = FuncAttrs.addFnAttributes(Ctx, AttrBldr); CodeGenIP = Builder.saveIP(); - Expected ReductionResult = - createReductionFunction(Builder.GetInsertBlock()->getParent()->getName(), - ReductionInfos, ReductionGenCBKind, FuncAttrs); + Expected ReductionResult = createReductionFunction( + Builder.GetInsertBlock()->getParent()->getName(), ReductionInfos, IsByRef, + ReductionGenCBKind, FuncAttrs); if (!ReductionResult) return ReductionResult.takeError(); Function *ReductionFunc = *ReductionResult; @@ -3656,15 +3756,25 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( Value *ElemPtr = Builder.CreateInBoundsGEP( RedArrayTy, ReductionList, {ConstantInt::get(IndexTy, 0), ConstantInt::get(IndexTy, En.index())}); + + Value *PrivateVar = RI.PrivateVariable; + bool IsByRefElem = !IsByRef.empty() && IsByRef[En.index()]; + if (IsByRefElem) + PrivateVar = Builder.CreateLoad(RI.ElementType, PrivateVar); + Value *CastElem = - Builder.CreatePointerBitCastOrAddrSpaceCast(RI.PrivateVariable, PtrTy); + Builder.CreatePointerBitCastOrAddrSpaceCast(PrivateVar, PtrTy); Builder.CreateStore(CastElem, ElemPtr); } CodeGenIP = Builder.saveIP(); - Function *SarFunc = - emitShuffleAndReduceFunction(ReductionInfos, ReductionFunc, FuncAttrs); + Expected SarFunc = emitShuffleAndReduceFunction( + ReductionInfos, ReductionFunc, FuncAttrs, IsByRef); + + if (!SarFunc) + return SarFunc.takeError(); + Expected CopyResult = - emitInterWarpCopyFunction(Loc, ReductionInfos, FuncAttrs); + emitInterWarpCopyFunction(Loc, ReductionInfos, FuncAttrs, IsByRef); if (!CopyResult) return CopyResult.takeError(); Function *WcFunc = *CopyResult; @@ -3684,7 +3794,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( Builder.getInt64(MaxDataSize * ReductionInfos.size()); if (!IsTeamsReduction) { Value *SarFuncCast = - Builder.CreatePointerBitCastOrAddrSpaceCast(SarFunc, FuncPtrTy); + Builder.CreatePointerBitCastOrAddrSpaceCast(*SarFunc, FuncPtrTy); Value *WcFuncCast = Builder.CreatePointerBitCastOrAddrSpaceCast(WcFunc, FuncPtrTy); Value *Args[] = {SrcLocInfo, ReductionDataSize, RL, SarFuncCast, @@ -3716,7 +3826,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( Builder.getInt32(ReductionBufNum), ReductionDataSize, RL, - SarFunc, + *SarFunc, WcFunc, LtGCFunc, LtGRFunc, @@ -3743,7 +3853,8 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( // Add emission of __kmpc_end_reduce{_nowait}(); for (auto En : enumerate(ReductionInfos)) { const ReductionInfo &RI = En.value(); - Value *LHS = RI.Variable; + Type *ValueType = RI.ElementType; + Value *RedValue = RI.Variable; Value *RHS = Builder.CreatePointerBitCastOrAddrSpaceCast(RI.PrivateVariable, PtrTy); @@ -3754,7 +3865,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( // Fix the CallBack code genereated to use the correct Values for the LHS // and RHS - LHSPtr->replaceUsesWithIf(LHS, [ReductionFunc](const Use &U) { + LHSPtr->replaceUsesWithIf(RedValue, [ReductionFunc](const Use &U) { return cast(U.getUser())->getParent()->getParent() == ReductionFunc; }); @@ -3763,15 +3874,21 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU( ReductionFunc; }); } else { - Value *LHSValue = Builder.CreateLoad(RI.ElementType, LHS, "final.lhs"); - Value *RHSValue = Builder.CreateLoad(RI.ElementType, RHS, "final.rhs"); + if (IsByRef.empty() || !IsByRef[En.index()]) { + RedValue = Builder.CreateLoad(ValueType, RI.Variable, + "red.value." + Twine(En.index())); + } + Value *PrivateRedValue = Builder.CreateLoad( + ValueType, RHS, "red.private.value" + Twine(En.index())); Value *Reduced; InsertPointOrErrorTy AfterIP = - RI.ReductionGen(Builder.saveIP(), RHSValue, LHSValue, Reduced); + RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced); if (!AfterIP) return AfterIP.takeError(); Builder.restoreIP(*AfterIP); - Builder.CreateStore(Reduced, LHS, false); + + if (!IsByRef.empty() && !IsByRef[En.index()]) + Builder.CreateStore(Reduced, RI.Variable); } } emitBlock(ExitBB, CurFunc); @@ -3872,7 +3989,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductions( assert(ReductionInfos.size() == IsByRef.size()); if (Config.isGPU()) return createReductionsGPU(Loc, AllocaIP, Builder.saveIP(), ReductionInfos, - IsNoWait, IsTeamsReduction); + IsByRef, IsNoWait, IsTeamsReduction); checkReductionInfos(ReductionInfos, /*IsGPU*/ false); diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index eebabfd772982..7932765db8359 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -1674,12 +1674,14 @@ static void writeConstantInternal(raw_ostream &Out, const Constant *CV, if (const auto *CPA = dyn_cast(CV)) { Out << "ptrauth ("; - // ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?) + // ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC[, ptr DS]?]?]?) unsigned NumOpsToWrite = 2; if (!CPA->getOperand(2)->isNullValue()) NumOpsToWrite = 3; if (!CPA->getOperand(3)->isNullValue()) NumOpsToWrite = 4; + if (!CPA->getOperand(4)->isNullValue()) + NumOpsToWrite = 5; ListSeparator LS; for (unsigned i = 0, e = NumOpsToWrite; i != e; ++i) { diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index a3aa5e9571657..6b82da140256f 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -2081,28 +2081,33 @@ Value *NoCFIValue::handleOperandChangeImpl(Value *From, Value *To) { // ConstantPtrAuth *ConstantPtrAuth::get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc) { - Constant *ArgVec[] = {Ptr, Key, Disc, AddrDisc}; + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol) { + Constant *ArgVec[] = {Ptr, Key, Disc, AddrDisc, DeactivationSymbol}; ConstantPtrAuthKeyType MapKey(ArgVec); LLVMContextImpl *pImpl = Ptr->getContext().pImpl; return pImpl->ConstantPtrAuths.getOrCreate(Ptr->getType(), MapKey); } ConstantPtrAuth *ConstantPtrAuth::getWithSameSchema(Constant *Pointer) const { - return get(Pointer, getKey(), getDiscriminator(), getAddrDiscriminator()); + return get(Pointer, getKey(), getDiscriminator(), getAddrDiscriminator(), + getDeactivationSymbol()); } ConstantPtrAuth::ConstantPtrAuth(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc) + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol) : Constant(Ptr->getType(), Value::ConstantPtrAuthVal, AllocMarker) { assert(Ptr->getType()->isPointerTy()); assert(Key->getBitWidth() == 32); assert(Disc->getBitWidth() == 64); assert(AddrDisc->getType()->isPointerTy()); + assert(DeactivationSymbol->getType()->isPointerTy()); setOperand(0, Ptr); setOperand(1, Key); setOperand(2, Disc); setOperand(3, AddrDisc); + setOperand(4, DeactivationSymbol); } /// Remove the constant from the constant table. @@ -2150,6 +2155,11 @@ bool ConstantPtrAuth::hasSpecialAddressDiscriminator(uint64_t Value) const { bool ConstantPtrAuth::isKnownCompatibleWith(const Value *Key, const Value *Discriminator, const DataLayout &DL) const { + // This function may only be validly called to analyze a ptrauth operation + // with no deactivation symbol, so if we have one it isn't compatible. + if (!getDeactivationSymbol()->isNullValue()) + return false; + // If the keys are different, there's no chance for this to be compatible. if (getKey() != Key) return false; diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h index e3e8d895a63f4..2073e0d42d8e3 100644 --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -539,7 +539,8 @@ struct ConstantPtrAuthKeyType { ConstantPtrAuth *create(TypeClass *Ty) const { return new ConstantPtrAuth(Operands[0], cast(Operands[1]), - cast(Operands[2]), Operands[3]); + cast(Operands[2]), Operands[3], + Operands[4]); } }; diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index 604730e0d3004..26c4f4ec784cd 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -1699,7 +1699,9 @@ LLVMValueRef LLVMConstantPtrAuth(LLVMValueRef Ptr, LLVMValueRef Key, LLVMValueRef Disc, LLVMValueRef AddrDisc) { return wrap(ConstantPtrAuth::get( unwrap(Ptr), unwrap(Key), - unwrap(Disc), unwrap(AddrDisc))); + unwrap(Disc), unwrap(AddrDisc), + ConstantPointerNull::get( + cast(unwrap(AddrDisc)->getType())))); } /*-- Opcode mapping */ diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index cd39970f5111f..85d3690dd8306 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -620,7 +620,8 @@ bool CallBase::hasReadingOperandBundles() const { // ptrauth) forces a callsite to be at least readonly. return hasOperandBundlesOtherThan({LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi, - LLVMContext::OB_convergencectrl}) && + LLVMContext::OB_convergencectrl, + LLVMContext::OB_deactivation_symbol}) && getIntrinsicID() != Intrinsic::assume; } @@ -628,7 +629,8 @@ bool CallBase::hasClobberingOperandBundles() const { return hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet, LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi, - LLVMContext::OB_convergencectrl}) && + LLVMContext::OB_convergencectrl, + LLVMContext::OB_deactivation_symbol}) && getIntrinsicID() != Intrinsic::assume; } diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp index 335c210c10e1a..10aba759185a7 100644 --- a/llvm/lib/IR/LLVMContext.cpp +++ b/llvm/lib/IR/LLVMContext.cpp @@ -55,6 +55,8 @@ static StringRef knownBundleName(unsigned BundleTagID) { return "convergencectrl"; case LLVMContext::OB_align: return "align"; + case LLVMContext::OB_deactivation_symbol: + return "deactivation-symbol"; default: llvm_unreachable("unknown bundle id"); } diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp index ca7605ae53453..8f79398b086eb 100644 --- a/llvm/lib/IR/LLVMContextImpl.cpp +++ b/llvm/lib/IR/LLVMContextImpl.cpp @@ -107,6 +107,7 @@ LLVMContextImpl::~LLVMContextImpl() { ArrayConstants.freeConstants(); StructConstants.freeConstants(); VectorConstants.freeConstants(); + ConstantPtrAuths.freeConstants(); InlineAsms.freeConstants(); CAZConstants.clear(); diff --git a/llvm/lib/IR/ReplaceConstant.cpp b/llvm/lib/IR/ReplaceConstant.cpp index b3586b45a23f2..b1864c3dc9eeb 100644 --- a/llvm/lib/IR/ReplaceConstant.cpp +++ b/llvm/lib/IR/ReplaceConstant.cpp @@ -22,9 +22,9 @@ static bool isExpandableUser(User *U) { return isa(U) || isa(U); } -static SmallVector expandUser(BasicBlock::iterator InsertPt, - Constant *C) { - SmallVector NewInsts; +static void expandUser(BasicBlock::iterator InsertPt, Constant *C, + SmallVector &NewInsts) { + NewInsts.clear(); if (auto *CE = dyn_cast(C)) { Instruction *ConstInst = CE->getAsInstruction(); ConstInst->insertBefore(*InsertPt->getParent(), InsertPt); @@ -46,7 +46,6 @@ static SmallVector expandUser(BasicBlock::iterator InsertPt, } else { llvm_unreachable("Not an expandable user"); } - return NewInsts; } bool llvm::convertUsersOfConstantsToInstructions(ArrayRef Consts, @@ -91,6 +90,11 @@ bool llvm::convertUsersOfConstantsToInstructions(ArrayRef Consts, // Replace those expandable operands with instructions bool Changed = false; + // We need to cache the instructions we've already expanded to avoid expanding + // the same constant multiple times in the same basic block, which is + // problematic when the same constant is used in a phi node multiple times. + DenseMap, SmallVector> + ConstantToInstructionMap; while (!InstructionWorklist.empty()) { Instruction *I = InstructionWorklist.pop_back_val(); DebugLoc Loc = I->getDebugLoc(); @@ -105,7 +109,14 @@ bool llvm::convertUsersOfConstantsToInstructions(ArrayRef Consts, if (auto *C = dyn_cast(U.get())) { if (ExpandableUsers.contains(C)) { Changed = true; - auto NewInsts = expandUser(BI, C); + SmallVector &NewInsts = + ConstantToInstructionMap[std::make_pair(C, BI->getParent())]; + // If the cached instruction is after the insertion point, we need to + // create a new one. We can't simply move the cached instruction + // because its operands (also expanded instructions) might not + // dominate the new position. + if (NewInsts.empty() || BI->comesBefore(NewInsts.front())) + expandUser(BI, C, NewInsts); for (auto *NI : NewInsts) NI->setDebugLoc(Loc); InstructionWorklist.insert_range(NewInsts); diff --git a/llvm/lib/IR/RuntimeLibcalls.cpp b/llvm/lib/IR/RuntimeLibcalls.cpp index cbe7a7b9f77f4..a5f842a5fb520 100644 --- a/llvm/lib/IR/RuntimeLibcalls.cpp +++ b/llvm/lib/IR/RuntimeLibcalls.cpp @@ -130,13 +130,23 @@ bool RuntimeLibcallsInfo::darwinHasExp10(const Triple &TT) { } } +/// TODO: There is really no guarantee that sizeof(size_t) is equal to the index +/// size of the default address space. This matches TargetLibraryInfo and should +/// be kept in sync. +static IntegerType *getSizeTType(LLVMContext &Ctx, const DataLayout &DL) { + return DL.getIndexType(Ctx, /*AddressSpace=*/0); +} + std::pair RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT, const DataLayout &DL, RTLIB::LibcallImpl LibcallImpl) const { + // TODO: NoCallback probably unsafe in general static constexpr Attribute::AttrKind CommonFnAttrs[] = { Attribute::MustProgress, Attribute::NoCallback, Attribute::NoFree, Attribute::NoSync, Attribute::NoUnwind, Attribute::WillReturn}; + static constexpr Attribute::AttrKind MemoryFnAttrs[] = { + Attribute::MustProgress, Attribute::NoUnwind, Attribute::WillReturn}; static constexpr Attribute::AttrKind CommonPtrArgAttrs[] = { Attribute::NoAlias, Attribute::WriteOnly, Attribute::NonNull}; @@ -182,6 +192,71 @@ RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT, return {FunctionType::get(RetTy, {ScalarTy}, false), Attrs}; } + case RTLIB::impl_malloc: + case RTLIB::impl_calloc: { + AttrBuilder FuncAttrBuilder(Ctx); + for (Attribute::AttrKind Attr : MemoryFnAttrs) + FuncAttrBuilder.addAttribute(Attr); + FuncAttrBuilder.addAttribute(Attribute::NoFree); + + AllocFnKind AllocKind = AllocFnKind::Alloc; + if (LibcallImpl == RTLIB::impl_malloc) + AllocKind |= AllocFnKind::Uninitialized; + + // TODO: Set memory attribute + FuncAttrBuilder.addAllocKindAttr(AllocKind); + FuncAttrBuilder.addAttribute("alloc-family", "malloc"); + FuncAttrBuilder.addAllocSizeAttr(0, LibcallImpl == RTLIB::impl_malloc + ? std::nullopt + : std::make_optional(1)); + + AttributeList Attrs; + Attrs = Attrs.addFnAttributes(Ctx, FuncAttrBuilder); + + { + AttrBuilder ArgAttrBuilder(Ctx); + for (Attribute::AttrKind AK : CommonPtrArgAttrs) + ArgAttrBuilder.addAttribute(AK); + + Attrs = Attrs.addRetAttribute(Ctx, Attribute::NoUndef); + Attrs = Attrs.addRetAttribute(Ctx, Attribute::NoAlias); + Attrs = Attrs.addParamAttribute(Ctx, 0, Attribute::NoUndef); + if (LibcallImpl == RTLIB::impl_calloc) + Attrs = Attrs.addParamAttribute(Ctx, 1, Attribute::NoUndef); + } + + IntegerType *SizeT = getSizeTType(Ctx, DL); + PointerType *PtrTy = PointerType::get(Ctx, 0); + SmallVector ArgTys = {SizeT}; + if (LibcallImpl == RTLIB::impl_calloc) + ArgTys.push_back(SizeT); + + return {FunctionType::get(PtrTy, ArgTys, false), Attrs}; + } + case RTLIB::impl_free: { + // TODO: Set memory attribute + AttrBuilder FuncAttrBuilder(Ctx); + for (Attribute::AttrKind Attr : MemoryFnAttrs) + FuncAttrBuilder.addAttribute(Attr); + + FuncAttrBuilder.addAllocKindAttr(AllocFnKind::Free); + FuncAttrBuilder.addAttribute("alloc-family", "malloc"); + + AttributeList Attrs; + Attrs = Attrs.addFnAttributes(Ctx, FuncAttrBuilder); + + { + AttrBuilder ArgAttrBuilder(Ctx); + ArgAttrBuilder.addAttribute(Attribute::NoUndef); + ArgAttrBuilder.addAttribute(Attribute::AllocatedPointer); + ArgAttrBuilder.addCapturesAttr(CaptureInfo::none()); + Attrs = Attrs.addParamAttributes(Ctx, 0, ArgAttrBuilder); + } + + return {FunctionType::get(Type::getVoidTy(Ctx), {PointerType::get(Ctx, 0)}, + false), + Attrs}; + } case RTLIB::impl_sqrtf: case RTLIB::impl_sqrt: { AttrBuilder FuncAttrBuilder(Ctx); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 7cc1980d24c33..a1e14d8f25bf7 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2732,6 +2732,14 @@ void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) { Check(CPA->getDiscriminator()->getBitWidth() == 64, "signed ptrauth constant discriminator must be i64 constant integer"); + + Check(CPA->getDeactivationSymbol()->getType()->isPointerTy(), + "signed ptrauth constant deactivation symbol must be a pointer"); + + Check(isa(CPA->getDeactivationSymbol()) || + CPA->getDeactivationSymbol()->isNullValue(), + "signed ptrauth constant deactivation symbol must be a global value " + "or null"); } bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) { diff --git a/llvm/lib/MC/MCELFStreamer.cpp b/llvm/lib/MC/MCELFStreamer.cpp index 973e98dd1fa29..8d6f7bf6e642e 100644 --- a/llvm/lib/MC/MCELFStreamer.cpp +++ b/llvm/lib/MC/MCELFStreamer.cpp @@ -364,7 +364,7 @@ void MCELFStreamer::finishImpl() { } finalizeCGProfile(); - emitFrames(nullptr); + emitFrames(); this->MCObjectStreamer::finishImpl(); } diff --git a/llvm/lib/MC/MCMachOStreamer.cpp b/llvm/lib/MC/MCMachOStreamer.cpp index 2b7a248e6d109..cde1d66127f06 100644 --- a/llvm/lib/MC/MCMachOStreamer.cpp +++ b/llvm/lib/MC/MCMachOStreamer.cpp @@ -422,7 +422,7 @@ void MCMachOStreamer::emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, } void MCMachOStreamer::finishImpl() { - emitFrames(&getAssembler().getBackend()); + emitFrames(); // We have to set the fragment atom associations so we can relax properly for // Mach-O. diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp index 701a0836d2c70..94468140a30b9 100644 --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -178,10 +178,11 @@ void MCObjectStreamer::reset() { MCStreamer::reset(); } -void MCObjectStreamer::emitFrames(MCAsmBackend *MAB) { +void MCObjectStreamer::emitFrames() { if (!getNumFrameInfos()) return; + auto *MAB = &getAssembler().getBackend(); if (EmitEHFrame) MCDwarfFrameEmitter::Emit(*this, MAB, true); diff --git a/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp b/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp index ff95ff78fd53a..22494fa11eb2a 100644 --- a/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp +++ b/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp @@ -24,6 +24,13 @@ using namespace llvm; return *NAME##View; \ } +#define MCSTROPT(NAME) \ + static cl::opt *NAME##View; \ + StringRef llvm::mc::get##NAME() { \ + assert(NAME##View && "RegisterMCTargetOptionsFlags not created."); \ + return *NAME##View; \ + } + #define MCOPT_EXP(TY, NAME) \ MCOPT(TY, NAME) \ std::optional llvm::mc::getExplicit##NAME() { \ @@ -52,8 +59,8 @@ MCOPT(bool, Crel) MCOPT(bool, ImplicitMapSyms) MCOPT(bool, X86RelaxRelocations) MCOPT(bool, X86Sse2Avx) -MCOPT(std::string, ABIName) -MCOPT(std::string, AsSecureLogFile) +MCSTROPT(ABIName) +MCSTROPT(AsSecureLogFile) llvm::mc::RegisterMCTargetOptionsFlags::RegisterMCTargetOptionsFlags() { #define MCBINDOPT(NAME) \ diff --git a/llvm/lib/MC/MCWasmStreamer.cpp b/llvm/lib/MC/MCWasmStreamer.cpp index 070b3d9f8d2c8..1d3cf38d4bfdb 100644 --- a/llvm/lib/MC/MCWasmStreamer.cpp +++ b/llvm/lib/MC/MCWasmStreamer.cpp @@ -147,7 +147,7 @@ void MCWasmStreamer::emitIdent(StringRef IdentString) { } void MCWasmStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); this->MCObjectStreamer::finishImpl(); } diff --git a/llvm/lib/Object/Archive.cpp b/llvm/lib/Object/Archive.cpp index 861c284253f7a..8e4a5ea5fc612 100644 --- a/llvm/lib/Object/Archive.cpp +++ b/llvm/lib/Object/Archive.cpp @@ -582,7 +582,8 @@ Expected Archive::Child::getBuffer() const { if (!FullNameOrErr) return FullNameOrErr.takeError(); const std::string &FullName = *FullNameOrErr; - ErrorOr> Buf = MemoryBuffer::getFile(FullName); + ErrorOr> Buf = + MemoryBuffer::getFile(FullName, false, /*RequiresNullTerminator=*/false); if (std::error_code EC = Buf.getError()) return errorCodeToError(EC); Parent->ThinBuffers.push_back(std::move(*Buf)); diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 0d190ea448931..f5281ea69b512 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -1590,24 +1590,31 @@ parseBoundsCheckingOptions(StringRef Params) { Options.Rt = { /*MinRuntime=*/false, /*MayReturn=*/true, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "rt-abort") { Options.Rt = { /*MinRuntime=*/false, /*MayReturn=*/false, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "min-rt") { Options.Rt = { /*MinRuntime=*/true, /*MayReturn=*/true, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "min-rt-abort") { Options.Rt = { /*MinRuntime=*/true, /*MayReturn=*/false, + /*HandlerPreserveAllRegs=*/false, }; } else if (ParamName == "merge") { Options.Merge = true; + } else if (ParamName == "handler-preserve-all-regs") { + if (Options.Rt) + Options.Rt->HandlerPreserveAllRegs = true; } else { StringRef ParamEQ; StringRef Val; diff --git a/llvm/lib/SandboxIR/Constant.cpp b/llvm/lib/SandboxIR/Constant.cpp index 9de88ef2cf0a0..eb14797af081c 100644 --- a/llvm/lib/SandboxIR/Constant.cpp +++ b/llvm/lib/SandboxIR/Constant.cpp @@ -412,10 +412,12 @@ PointerType *NoCFIValue::getType() const { } ConstantPtrAuth *ConstantPtrAuth::get(Constant *Ptr, ConstantInt *Key, - ConstantInt *Disc, Constant *AddrDisc) { + ConstantInt *Disc, Constant *AddrDisc, + Constant *DeactivationSymbol) { auto *LLVMC = llvm::ConstantPtrAuth::get( cast(Ptr->Val), cast(Key->Val), - cast(Disc->Val), cast(AddrDisc->Val)); + cast(Disc->Val), cast(AddrDisc->Val), + cast(DeactivationSymbol->Val)); return cast(Ptr->getContext().getOrCreateConstant(LLVMC)); } @@ -439,6 +441,11 @@ Constant *ConstantPtrAuth::getAddrDiscriminator() const { cast(Val)->getAddrDiscriminator()); } +Constant *ConstantPtrAuth::getDeactivationSymbol() const { + return Ctx.getOrCreateConstant( + cast(Val)->getDeactivationSymbol()); +} + ConstantPtrAuth *ConstantPtrAuth::getWithSameSchema(Constant *Pointer) const { auto *LLVMC = cast(Val)->getWithSameSchema( cast(Pointer->Val)); diff --git a/llvm/lib/Support/AllocToken.cpp b/llvm/lib/Support/AllocToken.cpp index daa40d4e9dcc6..cabe52189c4bb 100644 --- a/llvm/lib/Support/AllocToken.cpp +++ b/llvm/lib/Support/AllocToken.cpp @@ -28,6 +28,20 @@ llvm::getAllocTokenModeFromString(StringRef Name) { .Default(std::nullopt); } +StringRef llvm::getAllocTokenModeAsString(AllocTokenMode Mode) { + switch (Mode) { + case AllocTokenMode::Increment: + return "increment"; + case AllocTokenMode::Random: + return "random"; + case AllocTokenMode::TypeHash: + return "typehash"; + case AllocTokenMode::TypeHashPointerSplit: + return "typehashpointersplit"; + } + llvm_unreachable("Unknown AllocTokenMode"); +} + static uint64_t getStableHash(const AllocTokenMetadata &Metadata, uint64_t MaxTokens) { return getStableSipHash(Metadata.TypeName) % MaxTokens; diff --git a/llvm/lib/Target/AArch64/AArch64.td b/llvm/lib/Target/AArch64/AArch64.td index 0f457c2cab61b..1a4367b84353b 100644 --- a/llvm/lib/Target/AArch64/AArch64.td +++ b/llvm/lib/Target/AArch64/AArch64.td @@ -40,6 +40,8 @@ include "AArch64SchedPredExynos.td" include "AArch64SchedPredNeoverse.td" include "AArch64Combine.td" +defm : RemapAllTargetPseudoPointerOperands; + def AArch64InstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index 5da6181ba36dd..8267414e78955 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -49,12 +49,14 @@ #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstBuilder.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" #include "llvm/MC/TargetRegistry.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" @@ -95,6 +97,7 @@ class AArch64AsmPrinter : public AsmPrinter { bool EnableImportCallOptimization = false; DenseMap>> SectionToImportedFunctionCalls; + unsigned PAuthIFuncNextUniqueID = 1; public: static char ID; @@ -173,7 +176,12 @@ class AArch64AsmPrinter : public AsmPrinter { const MachineOperand *AUTAddrDisc, Register Scratch, std::optional PACKey, - uint64_t PACDisc, Register PACAddrDisc); + uint64_t PACDisc, Register PACAddrDisc, Value *DS); + + // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true + // if no instruction should be emitted because the deactivation symbol is + // defined in the current module so this function emitted a NOP instead. + bool emitDeactivationSymbolRelocation(Value *DS); // Emit the sequence for PAC. void emitPtrauthSign(const MachineInstr *MI); @@ -211,6 +219,10 @@ class AArch64AsmPrinter : public AsmPrinter { // authenticating) void LowerLOADgotAUTH(const MachineInstr &MI); + const MCExpr *emitPAuthRelocationAsIRelative( + const MCExpr *Target, uint16_t Disc, AArch64PACKey::ID KeyID, + bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr); + /// tblgen'erated driver function for lowering simple MI->MC /// pseudo instructions. bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst); @@ -2104,11 +2116,31 @@ void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) { LRCheckMethod); } +bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) { + if (!DS) + return false; + + if (isa(DS)) { + // Just emit the nop directly. + EmitToStreamer(MCInstBuilder(AArch64::HINT).addImm(0)); + return true; + } + MCSymbol *Dot = OutContext.createTempSymbol(); + OutStreamer->emitLabel(Dot); + const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext); + + const MCExpr *DSExpr = MCSymbolRefExpr::create( + OutContext.getOrCreateSymbol(DS->getName()), OutContext); + OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr, + SMLoc()); + return false; +} + void AArch64AsmPrinter::emitPtrauthAuthResign( Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc, const MachineOperand *AUTAddrDisc, Register Scratch, std::optional PACKey, uint64_t PACDisc, - Register PACAddrDisc) { + Register PACAddrDisc, Value *DS) { const bool IsAUTPAC = PACKey.has_value(); // We expand AUT/AUTPAC into a sequence of the form @@ -2155,15 +2187,17 @@ void AArch64AsmPrinter::emitPtrauthAuthResign( bool AUTZero = AUTDiscReg == AArch64::XZR; unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero); - // autiza x16 ; if AUTZero - // autia x16, x17 ; if !AUTZero - MCInst AUTInst; - AUTInst.setOpcode(AUTOpc); - AUTInst.addOperand(MCOperand::createReg(AUTVal)); - AUTInst.addOperand(MCOperand::createReg(AUTVal)); - if (!AUTZero) - AUTInst.addOperand(MCOperand::createReg(AUTDiscReg)); - EmitToStreamer(*OutStreamer, AUTInst); + if (!emitDeactivationSymbolRelocation(DS)) { + // autiza x16 ; if AUTZero + // autia x16, x17 ; if !AUTZero + MCInst AUTInst; + AUTInst.setOpcode(AUTOpc); + AUTInst.addOperand(MCOperand::createReg(AUTVal)); + AUTInst.addOperand(MCOperand::createReg(AUTVal)); + if (!AUTZero) + AUTInst.addOperand(MCOperand::createReg(AUTDiscReg)); + EmitToStreamer(*OutStreamer, AUTInst); + } // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done. if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap)) @@ -2227,6 +2261,9 @@ void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) { bool IsZeroDisc = DiscReg == AArch64::XZR; unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc); + if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol())) + return; + // paciza x16 ; if IsZeroDisc // pacia x16, x17 ; if !IsZeroDisc MCInst PACInst; @@ -2299,6 +2336,214 @@ void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) { EmitToStreamer(*OutStreamer, BRInst); } +static void emitAddress(MCStreamer &Streamer, MCRegister Reg, + const MCExpr *Expr, bool DSOLocal, + const MCSubtargetInfo &STI) { + MCValue Val; + if (!Expr->evaluateAsRelocatable(Val, nullptr)) + report_fatal_error("emitAddress could not evaluate"); + if (DSOLocal) { + Streamer.emitInstruction( + MCInstBuilder(AArch64::ADRP) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(Expr, AArch64::S_ABS_PAGE, + Streamer.getContext())), + STI); + Streamer.emitInstruction( + MCInstBuilder(AArch64::ADDXri) + .addReg(Reg) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(Expr, AArch64::S_LO12, + Streamer.getContext())) + .addImm(0), + STI); + } else { + auto *SymRef = + MCSymbolRefExpr::create(Val.getAddSym(), Streamer.getContext()); + Streamer.emitInstruction( + MCInstBuilder(AArch64::ADRP) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(SymRef, AArch64::S_GOT_PAGE, + Streamer.getContext())), + STI); + Streamer.emitInstruction( + MCInstBuilder(AArch64::LDRXui) + .addReg(Reg) + .addReg(Reg) + .addExpr(MCSpecifierExpr::create(SymRef, AArch64::S_GOT_LO12, + Streamer.getContext())), + STI); + if (Val.getConstant()) + Streamer.emitInstruction(MCInstBuilder(AArch64::ADDXri) + .addReg(Reg) + .addReg(Reg) + .addImm(Val.getConstant()) + .addImm(0), + STI); + } +} + +static bool targetSupportsPAuthRelocation(const Triple &TT, + const MCExpr *Target, + const MCExpr *DSExpr) { + // No released version of glibc supports PAuth relocations. + if (TT.isOSGlibc()) + return false; + + // We emit PAuth constants as IRELATIVE relocations in cases where the + // constant cannot be represented as a PAuth relocation: + // 1) There is a deactivation symbol. + // 2) The signed value is not a symbol. + return !DSExpr && !isa(Target); +} + +static bool targetSupportsIRelativeRelocation(const Triple &TT) { + // IFUNCs are ELF-only. + if (!TT.isOSBinFormatELF()) + return false; + + // musl doesn't support IFUNCs. + if (TT.isMusl()) + return false; + + return true; +} + +// Emit an ifunc resolver that returns a signed pointer to the specified target, +// and return a FUNCINIT reference to the resolver. In the linked binary, this +// function becomes the target of an IRELATIVE relocation. This resolver is used +// to relocate signed pointers in global variable initializers in special cases +// where the standard R_AARCH64_AUTH_ABS64 relocation would not work. +// +// Example (signed null pointer, not address discriminated): +// +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// mov x0, #0 +// mov x1, #12345 +// b __emupac_pacda +// +// Example (signed null pointer, address discriminated): +// +// .Ltmp: +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// mov x0, #0 +// adrp x1, .Ltmp +// add x1, x1, :lo12:.Ltmp +// b __emupac_pacda +// .popsection +// +// Example (signed pointer to symbol, not address discriminated): +// +// .Ltmp: +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// adrp x0, symbol +// add x0, x0, :lo12:symbol +// mov x1, #12345 +// b __emupac_pacda +// .popsection +// +// Example (signed null pointer, not address discriminated, with deactivation +// symbol ds): +// +// .8byte .Lpauth_ifunc0 +// .pushsection .text.startup,"ax",@progbits +// .Lpauth_ifunc0: +// mov x0, #0 +// mov x1, #12345 +// .reloc ., R_AARCH64_PATCHINST, ds +// b __emupac_pacda +// ret +// .popsection +const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative( + const MCExpr *Target, uint16_t Disc, AArch64PACKey::ID KeyID, + bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) { + const Triple &TT = TM.getTargetTriple(); + + // We only emit an IRELATIVE relocation if the target supports IRELATIVE and + // does not support the kind of PAuth relocation that we are trying to emit. + if (targetSupportsPAuthRelocation(TT, Target, DSExpr) || + !targetSupportsIRelativeRelocation(TT)) + return nullptr; + + // For now, only the DA key is supported. + if (KeyID != AArch64PACKey::DA) + return nullptr; + + std::unique_ptr STI( + TM.getTarget().createMCSubtargetInfo(TT, "", "")); + assert(STI && "Unable to create subtarget info"); + this->STI = static_cast(&*STI); + + MCSymbol *Place = OutStreamer->getContext().createTempSymbol(); + OutStreamer->emitLabel(Place); + OutStreamer->pushSection(); + + OutStreamer->switchSection(OutStreamer->getContext().getELFSection( + ".text.startup", ELF::SHT_PROGBITS, ELF::SHF_ALLOC | ELF::SHF_EXECINSTR, + 0, "", true, PAuthIFuncNextUniqueID++, nullptr)); + + MCSymbol *IRelativeSym = + OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc"); + OutStreamer->emitLabel(IRelativeSym); + if (isa(Target)) { + OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi) + .addReg(AArch64::X0) + .addExpr(Target) + .addImm(0), + *STI); + } else { + emitAddress(*OutStreamer, AArch64::X0, Target, IsDSOLocal, *STI); + } + if (HasAddressDiversity) { + auto *PlacePlusDisc = MCBinaryExpr::createAdd( + MCSymbolRefExpr::create(Place, OutStreamer->getContext()), + MCConstantExpr::create(static_cast(Disc), + OutStreamer->getContext()), + OutStreamer->getContext()); + emitAddress(*OutStreamer, AArch64::X1, PlacePlusDisc, /*IsDSOLocal=*/true, + *STI); + } else { + emitMOVZ(AArch64::X1, Disc, 0); + } + + if (DSExpr) { + MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol(); + OutStreamer->emitLabel(PrePACInst); + + auto *PrePACInstExpr = + MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext()); + OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST", + DSExpr, SMLoc()); + } + + // We don't know the subtarget because this is being emitted for a global + // initializer. Because the performance of IFUNC resolvers is unimportant, we + // always call the EmuPAC runtime, which will end up using the PAC instruction + // if the target supports PAC. + MCSymbol *EmuPAC = + OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda"); + const MCSymbolRefExpr *EmuPACRef = + MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext()); + OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef), + *STI); + + // We need a RET despite the above tail call because the deactivation symbol + // may replace the tail call with a NOP. + if (DSExpr) + OutStreamer->emitInstruction( + MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI); + OutStreamer->popSection(); + + return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT, + OutStreamer->getContext()); +} + const MCExpr * AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) { MCContext &Ctx = OutContext; @@ -2310,22 +2555,26 @@ AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) { auto *BaseGVB = dyn_cast(BaseGV); - // If we can't understand the referenced ConstantExpr, there's nothing - // else we can do: emit an error. - if (!BaseGVB) { - BaseGV->getContext().emitError( - "cannot resolve target base/addend of ptrauth constant"); - return nullptr; + const MCExpr *Sym; + if (BaseGVB) { + // If there is an addend, turn that into the appropriate MCExpr. + Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx); + if (Offset.sgt(0)) + Sym = MCBinaryExpr::createAdd( + Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx); + else if (Offset.slt(0)) + Sym = MCBinaryExpr::createSub( + Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx); + } else { + Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx); } - // If there is an addend, turn that into the appropriate MCExpr. - const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx); - if (Offset.sgt(0)) - Sym = MCBinaryExpr::createAdd( - Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx); - else if (Offset.slt(0)) - Sym = MCBinaryExpr::createSub( - Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx); + const MCExpr *DSExpr = nullptr; + if (auto *DS = dyn_cast(CPA.getDeactivationSymbol())) { + if (isa(DS)) + return Sym; + DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx); + } uint64_t KeyID = CPA.getKey()->getZExtValue(); // We later rely on valid KeyID value in AArch64PACKeyIDToString call from @@ -2344,6 +2593,16 @@ AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) { Disc = 0; } + // Check if we need to represent this with an IRELATIVE and emit it if so. + if (auto *IFuncSym = emitPAuthRelocationAsIRelative( + Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(), + BaseGVB && BaseGVB->isDSOLocal(), DSExpr)) + return IFuncSym; + + if (DSExpr) + report_fatal_error("deactivation symbols unsupported in constant " + "expressions on this target"); + // Finally build the complete @AUTH expr. return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(), Ctx); @@ -2948,17 +3207,18 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { } case AArch64::AUTx16x17: - emitPtrauthAuthResign(AArch64::X16, - (AArch64PACKey::ID)MI->getOperand(0).getImm(), - MI->getOperand(1).getImm(), &MI->getOperand(2), - AArch64::X17, std::nullopt, 0, 0); + emitPtrauthAuthResign( + AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(), + MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17, + std::nullopt, 0, 0, MI->getDeactivationSymbol()); return; case AArch64::AUTxMxN: emitPtrauthAuthResign(MI->getOperand(0).getReg(), (AArch64PACKey::ID)MI->getOperand(3).getImm(), MI->getOperand(4).getImm(), &MI->getOperand(5), - MI->getOperand(1).getReg(), std::nullopt, 0, 0); + MI->getOperand(1).getReg(), std::nullopt, 0, 0, + MI->getDeactivationSymbol()); return; case AArch64::AUTPAC: @@ -2966,7 +3226,8 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(), MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17, (AArch64PACKey::ID)MI->getOperand(3).getImm(), - MI->getOperand(4).getImm(), MI->getOperand(5).getReg()); + MI->getOperand(4).getImm(), MI->getOperand(5).getReg(), + MI->getDeactivationSymbol()); return; case AArch64::PAC: @@ -3447,6 +3708,9 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { return; } + if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol())) + return; + // Finally, do the automated lowerings for everything else. MCInst TmpInst; MCInstLowering.Lower(MI, TmpInst); diff --git a/llvm/lib/Target/AArch64/AArch64Features.td b/llvm/lib/Target/AArch64/AArch64Features.td index 72ff8613f01e7..066724bea92c9 100644 --- a/llvm/lib/Target/AArch64/AArch64Features.td +++ b/llvm/lib/Target/AArch64/AArch64Features.td @@ -894,6 +894,11 @@ def FeatureUseFixedOverScalableIfEqualCost : SubtargetFeature<"use-fixed-over-sc "UseFixedOverScalableIfEqualCost", "true", "Prefer fixed width loop vectorization over scalable if the cost-model assigns equal costs">; +def FeatureDisableMaximizeScalableBandwidth : SubtargetFeature< "disable-maximize-scalable-bandwidth", + "DisableMaximizeScalableBandwidth", "true", + "Determine the maximum scalable vector length for a loop by the " + "largest scalar type rather than the smallest">; + // For performance reasons we prefer to use ldapr to ldapur on certain cores. def FeatureAvoidLDAPUR : SubtargetFeature<"avoid-ldapur", "AvoidLDAPUR", "true", "Prefer add+ldapr to offset ldapur">; diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 08466667c0fa5..b721c1f533726 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1557,7 +1557,10 @@ void AArch64DAGToDAGISel::SelectPtrauthAuth(SDNode *N) { extractPtrauthBlendDiscriminators(AUTDisc, CurDAG); if (!Subtarget->isX16X17Safer()) { - SDValue Ops[] = {Val, AUTKey, AUTConstDisc, AUTAddrDisc}; + std::vector Ops = {Val, AUTKey, AUTConstDisc, AUTAddrDisc}; + // Copy deactivation symbol if present. + if (N->getNumOperands() > 4) + Ops.push_back(N->getOperand(4)); SDNode *AUT = CurDAG->getMachineNode(AArch64::AUTxMxN, DL, MVT::i64, MVT::i64, Ops); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 83ce39fa314d1..dd70d729ffc91 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10203,6 +10203,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, if (InGlue.getNode()) Ops.push_back(InGlue); + if (CLI.DeactivationSymbol) + Ops.push_back(DAG.getDeactivationSymbol(CLI.DeactivationSymbol)); + // If we're doing a tall call, use a TC_RETURN here rather than an // actual call instruction. if (IsTailCall) { @@ -22586,6 +22589,38 @@ static SDValue performSubWithBorrowCombine(SDNode *N, SelectionDAG &DAG) { Flags); } +// add(trunc(ashr(A, C)), trunc(lshr(A, BW-1))), with C >= BW +// -> +// X = trunc(ashr(A, C)); add(x, lshr(X, BW-1) +// The original converts into ashr+lshr+xtn+xtn+add. The second becomes +// ashr+xtn+usra. The first form has less total latency due to more parallelism, +// but more micro-ops and seems to be slower in practice. +static SDValue performAddTruncShiftCombine(SDNode *N, SelectionDAG &DAG) { + using namespace llvm::SDPatternMatch; + EVT VT = N->getValueType(0); + if (VT != MVT::v2i32 && VT != MVT::v4i16 && VT != MVT::v8i8) + return SDValue(); + + SDValue AShr, LShr; + if (!sd_match(N, m_Add(m_Trunc(m_Value(AShr)), m_Trunc(m_Value(LShr))))) + return SDValue(); + if (AShr.getOpcode() != AArch64ISD::VASHR) + std::swap(AShr, LShr); + if (AShr.getOpcode() != AArch64ISD::VASHR || + LShr.getOpcode() != AArch64ISD::VLSHR || + AShr.getOperand(0) != LShr.getOperand(0) || + AShr.getConstantOperandVal(1) < VT.getScalarSizeInBits() || + LShr.getConstantOperandVal(1) != VT.getScalarSizeInBits() * 2 - 1) + return SDValue(); + + SDLoc DL(N); + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, AShr); + SDValue Shift = DAG.getNode( + AArch64ISD::VLSHR, DL, VT, Trunc, + DAG.getTargetConstant(VT.getScalarSizeInBits() - 1, DL, MVT::i32)); + return DAG.getNode(ISD::ADD, DL, VT, Trunc, Shift); +} + static SDValue performAddSubCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // Try to change sum of two reductions. @@ -22609,6 +22644,8 @@ static SDValue performAddSubCombine(SDNode *N, return Val; if (SDValue Val = performSubWithBorrowCombine(N, DCI.DAG)) return Val; + if (SDValue Val = performAddTruncShiftCombine(N, DCI.DAG)) + return Val; if (SDValue Val = performExtBinopLoadFold(N, DCI.DAG)) return Val; diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index 6871c2d504cf6..61a8f764e39ed 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -2347,6 +2347,7 @@ class BImm pattern> let Inst{25-0} = addr; let DecoderMethod = "DecodeUnconditionalBranch"; + let supportsDeactivationSymbol = true; } class BranchImm pattern> @@ -2404,6 +2405,7 @@ class SignAuthOneData opcode_prefix, bits<2> opcode, string asm, let Inst{11-10} = opcode; let Inst{9-5} = Rn; let Inst{4-0} = Rd; + let supportsDeactivationSymbol = true; } class SignAuthZero opcode_prefix, bits<2> opcode, string asm, @@ -2417,6 +2419,7 @@ class SignAuthZero opcode_prefix, bits<2> opcode, string asm, let Inst{11-10} = opcode; let Inst{9-5} = 0b11111; let Inst{4-0} = Rd; + let supportsDeactivationSymbol = true; } class SignAuthTwoOperand opc, string asm, diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 03bad8ff8ac8a..b4d8649b31d6d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -2215,6 +2215,7 @@ let Predicates = [HasPAuth] in { let Size = 12; let Defs = [X16, X17]; let usesCustomInserter = 1; + let supportsDeactivationSymbol = true; } // A standalone pattern is used, so that literal 0 can be passed as $Disc. diff --git a/llvm/lib/Target/AArch64/AArch64Processors.td b/llvm/lib/Target/AArch64/AArch64Processors.td index 11387bb97d29c..120415f91c9ae 100644 --- a/llvm/lib/Target/AArch64/AArch64Processors.td +++ b/llvm/lib/Target/AArch64/AArch64Processors.td @@ -593,6 +593,7 @@ def TuneNeoverseN2 : SubtargetFeature<"neoversen2", "ARMProcFamily", "NeoverseN2 FeatureALULSLFast, FeaturePostRAScheduler, FeatureEnableSelectOptimize, + FeatureDisableMaximizeScalableBandwidth, FeaturePredictableSelectIsExpensive]>; def TuneNeoverseN3 : SubtargetFeature<"neoversen3", "ARMProcFamily", "NeoverseN3", @@ -626,6 +627,7 @@ def TuneNeoverseV1 : SubtargetFeature<"neoversev1", "ARMProcFamily", "NeoverseV1 FeaturePostRAScheduler, FeatureEnableSelectOptimize, FeaturePredictableSelectIsExpensive, + FeatureDisableMaximizeScalableBandwidth, FeatureNoSVEFPLD1R]>; def TuneNeoverseV2 : SubtargetFeature<"neoversev2", "ARMProcFamily", "NeoverseV2", diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 0bae00bafee3c..edd61a2db705e 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -375,8 +375,13 @@ AArch64TTIImpl::getInlineCallPenalty(const Function *F, const CallBase &Call, bool AArch64TTIImpl::shouldMaximizeVectorBandwidth( TargetTransformInfo::RegisterKind K) const { assert(K != TargetTransformInfo::RGK_Scalar); - return (K == TargetTransformInfo::RGK_FixedWidthVector && - ST->isNeonAvailable()); + + if (K == TargetTransformInfo::RGK_FixedWidthVector && ST->isNeonAvailable()) + return true; + + return K == TargetTransformInfo::RGK_ScalableVector && + ST->isSVEorStreamingSVEAvailable() && + !ST->disableMaximizeScalableBandwidth(); } /// Calculate the cost of materializing a 64-bit value. This helper diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index 55694efafeed1..7907a3c283624 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -1421,6 +1421,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, } else if (Info.CFIType) { MIB->setCFIType(MF, Info.CFIType->getZExtValue()); } + MIB->setDeactivationSymbol(MF, Info.DeactivationSymbol); MIB.add(Info.Callee); diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp index 64f96c57d2026..942e1bd5b4e0b 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64WinCOFFStreamer.cpp @@ -53,7 +53,7 @@ void AArch64WinCOFFStreamer::emitWindowsUnwindTables() { } void AArch64WinCOFFStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); emitWindowsUnwindTables(); MCWinCOFFStreamer::finishImpl(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index 8e35ba77d69aa..71ea9ef6fc050 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -143,14 +143,6 @@ class AMDGPUCodeGenPrepareImpl bool canBreakPHINode(const PHINode &I); - /// \returns True if binary operation \p I is a signed binary operation, false - /// otherwise. - bool isSigned(const BinaryOperator &I) const; - - /// \returns True if the condition of 'select' operation \p I comes from a - /// signed 'icmp' operation, false otherwise. - bool isSigned(const SelectInst &I) const; - /// Return true if \p T is a legal scalar floating point type. bool isLegalFloatingTy(const Type *T) const; @@ -304,16 +296,6 @@ bool AMDGPUCodeGenPrepareImpl::run() { return MadeChange; } -bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const { - return I.getOpcode() == Instruction::AShr || - I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; -} - -bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const { - return isa(I.getOperand(0)) && - cast(I.getOperand(0))->isSigned(); -} - bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const { return Ty->isFloatTy() || Ty->isDoubleTy() || (Ty->isHalfTy() && ST.has16BitInsts()); diff --git a/llvm/lib/Target/AMDGPU/R600.td b/llvm/lib/Target/AMDGPU/R600.td index 9148edb92b084..bdfaac9f42ea7 100644 --- a/llvm/lib/Target/AMDGPU/R600.td +++ b/llvm/lib/Target/AMDGPU/R600.td @@ -8,15 +8,6 @@ include "llvm/Target/Target.td" -def R600InstrInfo : InstrInfo { - let guessInstructionProperties = 1; -} - -def R600 : Target { - let InstructionSet = R600InstrInfo; - let AllowRegisterRenaming = 1; -} - let Namespace = "R600" in { foreach Index = 0-15 in { @@ -27,6 +18,18 @@ include "R600RegisterInfo.td" } +defm : RemapAllTargetPseudoPointerOperands; + +def R600InstrInfo : InstrInfo { + let guessInstructionProperties = 1; +} + +def R600 : Target { + let InstructionSet = R600InstrInfo; + let AllowRegisterRenaming = 1; +} + + def NullALU : InstrItinClass; def ALU_NULL : FuncUnit; diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index 3fe37e8217f35..c5f5b7d53cfb1 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -4751,3 +4751,14 @@ def V_ILLEGAL : Enc32, InstSI<(outs), (ins), "v_illegal"> { let hasSideEffects = 1; let SubtargetPredicate = isGFX10Plus; } + +defvar VGPR32_Ptr_Opcodes = [LOAD_STACK_GUARD]; +defvar VGPR64_Ptr_Opcodes = !listremove(PseudosWithPtrOps, VGPR32_Ptr_Opcodes); + +foreach inst = VGPR32_Ptr_Opcodes in { + def : RemapPointerOperands; +} + +foreach inst = VGPR64_Ptr_Opcodes in { + def : RemapPointerOperands; +} diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td index 570aae9b3c7a7..1f71d810983db 100644 --- a/llvm/lib/Target/ARM/ARM.td +++ b/llvm/lib/Target/ARM/ARM.td @@ -38,6 +38,14 @@ include "ARMSchedule.td" //===----------------------------------------------------------------------===// include "ARMInstrInfo.td" + +def Thumb1OnlyMode : HwMode<[IsThumb1Only]>; +def arm_ptr_rc : RegClassByHwMode< + [DefaultMode, Thumb1OnlyMode], + [GPR, tGPR]>; + +defm : RemapAllTargetPseudoPointerOperands; + def ARMInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp index ca366edad89ee..060d1f86f6846 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp @@ -54,7 +54,7 @@ void ARMWinCOFFStreamer::emitWindowsUnwindTables() { } void ARMWinCOFFStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); emitWindowsUnwindTables(); MCWinCOFFStreamer::finishImpl(); diff --git a/llvm/lib/Target/AVR/AVR.td b/llvm/lib/Target/AVR/AVR.td index 22ffc4a368ad6..f4ee11984cb73 100644 --- a/llvm/lib/Target/AVR/AVR.td +++ b/llvm/lib/Target/AVR/AVR.td @@ -32,6 +32,8 @@ include "AVRRegisterInfo.td" include "AVRInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def AVRInstrInfo : InstrInfo; //===---------------------------------------------------------------------===// diff --git a/llvm/lib/Target/BPF/BPF.td b/llvm/lib/Target/BPF/BPF.td index 436b7eef600e7..50f9793fb29a7 100644 --- a/llvm/lib/Target/BPF/BPF.td +++ b/llvm/lib/Target/BPF/BPF.td @@ -13,6 +13,9 @@ include "BPFCallingConv.td" include "BPFInstrInfo.td" include "GISel/BPFRegisterBanks.td" + +defm : RemapAllTargetPseudoPointerOperands; + def BPFInstrInfo : InstrInfo; class Proc Features> diff --git a/llvm/lib/Target/CSKY/CSKY.td b/llvm/lib/Target/CSKY/CSKY.td index b5df93a9d464c..45ef9441b0a41 100644 --- a/llvm/lib/Target/CSKY/CSKY.td +++ b/llvm/lib/Target/CSKY/CSKY.td @@ -671,6 +671,8 @@ def : CK860V<"ck860fv", NoSchedModel, // Define the CSKY target. //===----------------------------------------------------------------------===// +defm : RemapAllTargetPseudoPointerOperands; + def CSKYInstrInfo : InstrInfo; diff --git a/llvm/lib/Target/DirectX/DirectX.td b/llvm/lib/Target/DirectX/DirectX.td index 4d1d45b84a683..1717d533d90fa 100644 --- a/llvm/lib/Target/DirectX/DirectX.td +++ b/llvm/lib/Target/DirectX/DirectX.td @@ -22,6 +22,8 @@ include "DXILStubs.td" // DirectX Subtarget features. //===----------------------------------------------------------------------===// +defm : RemapAllTargetPseudoPointerOperands; + def DirectXInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Hexagon/Hexagon.td b/llvm/lib/Target/Hexagon/Hexagon.td index ede8463ff644b..17c72c393b432 100644 --- a/llvm/lib/Target/Hexagon/Hexagon.td +++ b/llvm/lib/Target/Hexagon/Hexagon.td @@ -413,6 +413,8 @@ include "HexagonPatternsV65.td" include "HexagonDepMappings.td" include "HexagonIntrinsics.td" +defm : RemapAllTargetPseudoPointerOperands; + def HexagonInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Lanai/Lanai.td b/llvm/lib/Target/Lanai/Lanai.td index c6d949f42047e..9a5422db5feeb 100644 --- a/llvm/lib/Target/Lanai/Lanai.td +++ b/llvm/lib/Target/Lanai/Lanai.td @@ -21,6 +21,8 @@ include "LanaiRegisterInfo.td" include "LanaiCallingConv.td" include "LanaiInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def LanaiInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/LoongArch/LoongArch.td b/llvm/lib/Target/LoongArch/LoongArch.td index 6497ff999f6fa..67f07f0a0370e 100644 --- a/llvm/lib/Target/LoongArch/LoongArch.td +++ b/llvm/lib/Target/LoongArch/LoongArch.td @@ -202,6 +202,8 @@ def : ProcessorModel<"la664", NoSchedModel, [Feature64Bit, // Define the LoongArch target. //===----------------------------------------------------------------------===// +defm : RemapAllTargetPseudoPointerOperands; + def LoongArchInstrInfo : InstrInfo { let guessInstructionProperties = 0; } diff --git a/llvm/lib/Target/M68k/M68k.td b/llvm/lib/Target/M68k/M68k.td index dab66d1022955..dfa44a423ae25 100644 --- a/llvm/lib/Target/M68k/M68k.td +++ b/llvm/lib/Target/M68k/M68k.td @@ -95,6 +95,8 @@ include "GISel/M68kRegisterBanks.td" include "M68kInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def M68kInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/MSP430/MSP430.td b/llvm/lib/Target/MSP430/MSP430.td index 38aa30fcf4dd1..cb3949838f6f2 100644 --- a/llvm/lib/Target/MSP430/MSP430.td +++ b/llvm/lib/Target/MSP430/MSP430.td @@ -61,6 +61,8 @@ include "MSP430CallingConv.td" include "MSP430InstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def MSP430InstrInfo : InstrInfo; //===---------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Mips/Mips.td b/llvm/lib/Target/Mips/Mips.td index e18388c179108..6c8d177093c76 100644 --- a/llvm/lib/Target/Mips/Mips.td +++ b/llvm/lib/Target/Mips/Mips.td @@ -244,6 +244,8 @@ include "MipsScheduleI6400.td" include "MipsScheduleP5600.td" include "MipsScheduleGeneric.td" +defm : RemapAllTargetPseudoPointerOperands; + def MipsInstrInfo : InstrInfo { } diff --git a/llvm/lib/Target/NVPTX/NVPTX.td b/llvm/lib/Target/NVPTX/NVPTX.td index 31c117a8c0fee..d41a43de95098 100644 --- a/llvm/lib/Target/NVPTX/NVPTX.td +++ b/llvm/lib/Target/NVPTX/NVPTX.td @@ -150,6 +150,16 @@ def : Proc<"sm_121", [SM121, PTX88]>; def : Proc<"sm_121a", [SM121a, PTX88]>; def : Proc<"sm_121f", [SM121f, PTX88]>; + +def Is64Bit : Predicate<"Subtarget->getTargetTriple().getArch() == Triple::nvptx64">; +def NVPTX64 : HwMode<[Is64Bit]>; + +def nvptx_ptr_rc : RegClassByHwMode< + [DefaultMode, NVPTX64], + [B32, B64]>; + +defm : RemapAllTargetPseudoPointerOperands; + def NVPTXInstrInfo : InstrInfo { } diff --git a/llvm/lib/Target/PowerPC/PPC.td b/llvm/lib/Target/PowerPC/PPC.td index 5d9ec4adf45c7..dc00aebe311f9 100644 --- a/llvm/lib/Target/PowerPC/PPC.td +++ b/llvm/lib/Target/PowerPC/PPC.td @@ -820,6 +820,8 @@ def PPCAsmParserVariant : AsmParserVariant { string BreakCharacters = "."; } +defm : RemapAllTargetPseudoPointerOperands; + def PPC : Target { // Information about the instructions. let InstructionSet = PPCInstrInfo; diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td index 65d0484805b95..d6b13680a057e 100644 --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td @@ -904,6 +904,10 @@ def PPCRegGxRCNoR0Operand : AsmOperandClass { let Name = "RegGxRCNoR0"; let PredicateMethod = "isRegNumber"; } +def ppc_ptr_rc : RegClassByHwMode< + [PPC32, PPC64], + [GPRC, G8RC]>; + def ptr_rc_nor0_by_hwmode : RegClassByHwMode< [PPC32, PPC64], [GPRC_NOR0, G8RC_NOX0]>; diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td index b24d8637cb27f..f6f82fd9bb55f 100644 --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -96,6 +96,8 @@ def RISCVAsmWriter : AsmWriter { int PassSubtarget = 1; } +defm : RemapAllTargetPseudoPointerOperands; + def RISCV : Target { let InstructionSet = RISCVInstrInfo; let AssemblyParsers = [RISCVAsmParser]; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td index b683e895c31c0..bbe3baef36bab 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td @@ -838,7 +838,6 @@ def : Pat<(fpextend (bf16 FPR16:$rs)), (NDS_FCVT_S_BF16 (bf16 FPR16:$rs))>; def : Pat<(bf16 (fpround FPR32:$rs)), (NDS_FCVT_BF16_S FPR32:$rs)>; -} // Predicates = [HasVendorXAndesBFHCvt] let isCodeGenOnly = 1 in { def NDS_FMV_BF16_X : FPUnaryOp_r<0b1111000, 0b00000, 0b000, FPR16, GPR, "fmv.w.x">, @@ -847,7 +846,6 @@ def NDS_FMV_X_BF16 : FPUnaryOp_r<0b1110000, 0b00000, 0b000, GPR, FPR16, "fmv.x.w Sched<[WriteFMovF32ToI32, ReadFMovF32ToI32]>; } -let Predicates = [HasVendorXAndesBFHCvt] in { def : Pat<(riscv_nds_fmv_bf16_x GPR:$src), (NDS_FMV_BF16_X GPR:$src)>; def : Pat<(riscv_nds_fmv_x_anyextbf16 (bf16 FPR16:$src)), (NDS_FMV_X_BF16 (bf16 FPR16:$src))>; diff --git a/llvm/lib/Target/SPIRV/SPIRV.td b/llvm/lib/Target/SPIRV/SPIRV.td index 39a4131c7f1bd..cc9c7913af427 100644 --- a/llvm/lib/Target/SPIRV/SPIRV.td +++ b/llvm/lib/Target/SPIRV/SPIRV.td @@ -14,6 +14,8 @@ include "SPIRVInstrInfo.td" include "SPIRVCombine.td" include "SPIRVBuiltins.td" +defm : RemapAllTargetPseudoPointerOperands; + def SPIRVInstrInfo : InstrInfo; class Proc Features> diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp index b2cbdb2ad7375..709f49b0fecc1 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -3373,6 +3373,8 @@ SPIRVType *lowerBuiltinType(const Type *OpaqueType, TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR); } else if (Name == "spirv.VulkanBuffer") { TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR); + } else if (Name == "spirv.Padding") { + TargetType = GR->getOrCreatePaddingType(MIRBuilder); } else if (Name == "spirv.Layout") { TargetType = getLayoutType(BuiltinType, MIRBuilder, GR); } else { diff --git a/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp b/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp index 329774df554f4..227d8716d974a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp @@ -79,15 +79,20 @@ static bool replaceCBufferAccesses(Module &M) { // The handle definition should dominate all uses of the cbuffer members. // We'll insert our getpointer calls right after it. IRBuilder<> Builder(HandleDef->getNextNode()); + auto *HandleTy = cast(Mapping.Handle->getValueType()); + auto *LayoutTy = cast(HandleTy->getTypeParameter(0)); + const StructLayout *SL = M.getDataLayout().getStructLayout(LayoutTy); - for (uint32_t Index = 0; Index < Mapping.Members.size(); ++Index) { - GlobalVariable *MemberGV = Mapping.Members[Index].GV; + for (const hlsl::CBufferMember &Member : Mapping.Members) { + GlobalVariable *MemberGV = Member.GV; if (MemberGV->use_empty()) { continue; } + uint32_t IndexInStruct = SL->getElementContainingOffset(Member.Offset); + // Create the getpointer intrinsic call. - Value *IndexVal = Builder.getInt32(Index); + Value *IndexVal = Builder.getInt32(IndexInStruct); Type *PtrType = MemberGV->getType(); Value *GetPointerCall = Builder.CreateIntrinsic( PtrType, Intrinsic::spv_resource_getpointer, {HandleDef, IndexVal}); diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp index 8e14fb03127fc..eea49bfdaf04b 100644 --- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp @@ -841,6 +841,7 @@ Type *SPIRVEmitIntrinsics::deduceElementTypeHelper( uint32_t Index = cast(II->getOperand(1))->getZExtValue(); Ty = cast(Ty)->getElementType(Index); } + Ty = reconstitutePeeledArrayType(Ty); } else { llvm_unreachable("Unknown handle type for spv_resource_getpointer."); } @@ -1569,16 +1570,57 @@ Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) { return BrI; } -Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) { - if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) && - TM->getSubtargetImpl()->isLogicalSPIRV()) { - Instruction *Result = buildLogicalAccessChainFromGEP(I); - if (Result) - return Result; +static bool isFirstIndexZero(const GetElementPtrInst *GEP) { + if (GEP->getNumIndices() == 0) + return false; + if (const auto *CI = dyn_cast(GEP->getOperand(1))) { + return CI->getZExtValue() == 0; } + return false; +} +Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) { IRBuilder<> B(I.getParent()); B.SetInsertPoint(&I); + + if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(&I)) { + // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first + // index of the GEP is not 0, then we need to try to adjust it. + // + // If the GEP is doing byte addressing, try to rebuild the full access chain + // from the type of the pointer. + if (I.getSourceElementType() == + IntegerType::getInt8Ty(CurrF->getContext())) { + return buildLogicalAccessChainFromGEP(I); + } + + // Look for the array-to-pointer decay. If this is the pattern + // we can adjust the types, and prepend a 0 to the indices. + Value *PtrOp = I.getPointerOperand(); + Type *SrcElemTy = I.getSourceElementType(); + Type *DeducedPointeeTy = deduceElementType(PtrOp, true); + + if (auto *ArrTy = dyn_cast(DeducedPointeeTy)) { + if (ArrTy->getElementType() == SrcElemTy) { + SmallVector NewIndices; + Type *FirstIdxType = I.getOperand(1)->getType(); + NewIndices.push_back(ConstantInt::get(FirstIdxType, 0)); + for (Value *Idx : I.indices()) + NewIndices.push_back(Idx); + + SmallVector Types = {I.getType(), I.getPointerOperandType()}; + SmallVector Args; + Args.push_back(B.getInt1(I.isInBounds())); + Args.push_back(I.getPointerOperand()); + Args.append(NewIndices.begin(), NewIndices.end()); + + auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); + replaceAllUsesWithAndErase(B, &I, NewI); + return NewI; + } + } + } + SmallVector Types = {I.getType(), I.getOperand(0)->getType()}; SmallVector Args; Args.push_back(B.getInt1(I.isInBounds())); @@ -1772,16 +1814,12 @@ void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I, Value *Pointer = GEPI->getPointerOperand(); Type *OpTy = nullptr; - // Knowing the accessed type is mandatory for logical SPIR-V. Sadly, - // the GEP source element type should not be used for this purpose, and - // the alternative type-scavenging method is not working. - // Physical SPIR-V can work around this, but not logical, hence still - // try to rely on the broken type scavenging for logical. - bool IsRewrittenGEP = - GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext()); - if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) { - Value *Src = getPointerRoot(Pointer); - OpTy = GR->findDeducedElementType(Src); + // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If + // the first index is 0, then we can trivially lower to OpAccessChain. If + // not we need to try to rewrite the GEP. We avoid adding a pointer cast at + // this time, and will rewrite the GEP when visiting it. + if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) { + return; } // In all cases, fall back to the GEP type if type scavenging failed. diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index bd0c7d15afd12..8b1a09caf907d 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -22,6 +22,7 @@ #include "llvm/ADT/APInt.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/Function.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicsSPIRV.h" @@ -224,14 +225,43 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeVoid(MachineIRBuilder &MIRBuilder) { } void SPIRVGlobalRegistry::invalidateMachineInstr(MachineInstr *MI) { - // TODO: - // - review other data structure wrt. possible issues related to removal - // of a machine instruction during instruction selection. + // Other maps that may hold MachineInstr*: + // - VRegToTypeMap: We cannot remove the definitions of `MI` from + // VRegToTypeMap because some calls to invalidateMachineInstr are replacing MI + // with another instruction defining the same register. We expect that if MI + // is a type instruction, and it is still referenced in VRegToTypeMap, then + // those registers are dead or the VRegToTypeMap is out-of-date. We do not + // expect passes to ask for the SPIR-V type of a dead register. If the + // VRegToTypeMap is out-of-date already, then there was an error before. We + // cannot add an assert to verify this because the VRegToTypeMap can be + // out-of-date. + // - FunctionToInstr & FunctionToInstrRev: At this point, we should not be + // deleting functions. No need to update. + // - AliasInstMDMap: Would require a linear search, and the Intel Alias + // instruction are not instructions instruction selection will be able to + // remove. + + const SPIRVSubtarget &ST = MI->getMF()->getSubtarget(); + [[maybe_unused]] const SPIRVInstrInfo *TII = ST.getInstrInfo(); + assert(!TII->isAliasingInstr(*MI) && + "Cannot invalidate aliasing instructions."); + assert(MI->getOpcode() != SPIRV::OpFunction && + "Cannot invalidate OpFunction."); + + if (MI->getOpcode() == SPIRV::OpFunctionCall) { + if (const auto *F = dyn_cast(MI->getOperand(2).getGlobal())) { + auto It = ForwardCalls.find(F); + if (It != ForwardCalls.end()) { + It->second.erase(MI); + if (It->second.empty()) + ForwardCalls.erase(It); + } + } + } + const MachineFunction *MF = MI->getMF(); auto It = LastInsertedTypeMap.find(MF); - if (It == LastInsertedTypeMap.end()) - return; - if (It->second == MI) + if (It != LastInsertedTypeMap.end() && It->second == MI) LastInsertedTypeMap.erase(MF); // remove from the duplicate tracker to avoid incorrect reuse erase(MI); @@ -314,7 +344,7 @@ Register SPIRVGlobalRegistry::createConstFP(const ConstantFP *CF, LLT LLTy = LLT::scalar(BitWidth); Register Res = CurMF->getRegInfo().createGenericVirtualRegister(LLTy); CurMF->getRegInfo().setRegClass(Res, &SPIRV::fIDRegClass); - assignFloatTypeToVReg(BitWidth, Res, I, TII); + assignSPIRVTypeToVReg(SpvType, Res, *CurMF); MachineInstr *DepMI = const_cast(SpvType); MachineIRBuilder MIRBuilder(*DepMI->getParent(), DepMI->getIterator()); @@ -890,6 +920,17 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct( const StructType *Ty, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual, StructOffsetDecorator Decorator, bool EmitIR) { + Type *OriginalElementType = nullptr; + uint64_t TotalSize = 0; + if (matchPeeledArrayPattern(Ty, OriginalElementType, TotalSize)) { + SPIRVType *ElementSPIRVType = findSPIRVType( + OriginalElementType, MIRBuilder, AccQual, + /* ExplicitLayoutRequired= */ Decorator != nullptr, EmitIR); + return getOpTypeArray(TotalSize, ElementSPIRVType, MIRBuilder, + /*ExplicitLayoutRequired=*/Decorator != nullptr, + EmitIR); + } + const SPIRVSubtarget &ST = cast(MIRBuilder.getMF().getSubtarget()); SmallVector FieldTypes; @@ -1414,6 +1455,18 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateVulkanBufferType( return R; } +SPIRVType * +SPIRVGlobalRegistry::getOrCreatePaddingType(MachineIRBuilder &MIRBuilder) { + auto Key = SPIRV::irhandle_padding(); + if (const MachineInstr *MI = findMI(Key, &MIRBuilder.getMF())) + return MI; + auto *T = Type::getInt8Ty(MIRBuilder.getContext()); + SPIRVType *R = getOrCreateSPIRVIntegerType(8, MIRBuilder); + finishCreatingSPIRVType(T, R); + add(Key, R); + return R; +} + SPIRVType *SPIRVGlobalRegistry::getOrCreateLayoutType( MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr) { auto Key = SPIRV::handle(T); diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h index 09c77f0cfd4f5..e5a1a2aa8d70f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h @@ -611,6 +611,8 @@ class SPIRVGlobalRegistry : public SPIRVIRMapping { SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr = false); + SPIRVType *getOrCreatePaddingType(MachineIRBuilder &MIRBuilder); + SPIRVType *getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr = false); diff --git a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h index c99d603d340ea..47c7676d5631c 100644 --- a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h +++ b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h @@ -64,6 +64,7 @@ enum SpecialTypeKind { STK_Value, STK_MachineInstr, STK_VkBuffer, + STK_Padding, STK_ExplictLayoutType, STK_Last = -1 }; @@ -149,6 +150,10 @@ inline IRHandle irhandle_vkbuffer(const Type *ElementType, SpecialTypeKind::STK_VkBuffer); } +inline IRHandle irhandle_padding() { + return std::make_tuple(nullptr, 0, SpecialTypeKind::STK_Padding); +} + inline IRHandle irhandle_explict_layout_type(const Type *Ty) { const Type *WrpTy = unifyPtrType(Ty); return irhandle_ptr(WrpTy, Ty->getTypeID(), STK_ExplictLayoutType); diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index d3fc08eb56cb3..2c27289e759eb 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -94,6 +94,8 @@ class SPIRVInstructionSelector : public InstructionSelector { private: void resetVRegsType(MachineFunction &MF); + void removeDeadInstruction(MachineInstr &MI) const; + void removeOpNamesForDeadMI(MachineInstr &MI) const; // tblgen-erated 'select' implementation, used as the initial selector for // the patterns that don't require complex C++. @@ -149,6 +151,9 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectStackRestore(MachineInstr &I) const; bool selectMemOperation(Register ResVReg, MachineInstr &I) const; + Register getOrCreateMemSetGlobal(MachineInstr &I) const; + bool selectCopyMemory(MachineInstr &I, Register SrcReg) const; + bool selectCopyMemorySized(MachineInstr &I, Register SrcReg) const; bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, unsigned NewOpcode, @@ -467,6 +472,7 @@ static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, switch (Opcode) { case TargetOpcode::G_CONSTANT: case TargetOpcode::G_FCONSTANT: + case TargetOpcode::G_IMPLICIT_DEF: return true; case TargetOpcode::G_INTRINSIC: case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: @@ -509,22 +515,202 @@ static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) { return false; } +// TODO(168736): We should make this either a flag in tabelgen +// or reduce our dependence on the global registry, so we can remove this +// function. It can easily be missed when new intrinsics are added. + +// Most SPIR-V instrinsics are considered to have side-effects in their tablegen +// definition because they are referenced in the global registry. This is a list +// of intrinsics that have no side effects other than their references in the +// global registry. +static bool intrinsicHasSideEffects(Intrinsic::ID ID) { + switch (ID) { + // This is not an exhaustive list and may need to be updated. + case Intrinsic::spv_all: + case Intrinsic::spv_alloca: + case Intrinsic::spv_any: + case Intrinsic::spv_bitcast: + case Intrinsic::spv_const_composite: + case Intrinsic::spv_cross: + case Intrinsic::spv_degrees: + case Intrinsic::spv_distance: + case Intrinsic::spv_extractelt: + case Intrinsic::spv_extractv: + case Intrinsic::spv_faceforward: + case Intrinsic::spv_fdot: + case Intrinsic::spv_firstbitlow: + case Intrinsic::spv_firstbitshigh: + case Intrinsic::spv_firstbituhigh: + case Intrinsic::spv_frac: + case Intrinsic::spv_gep: + case Intrinsic::spv_global_offset: + case Intrinsic::spv_global_size: + case Intrinsic::spv_group_id: + case Intrinsic::spv_insertelt: + case Intrinsic::spv_insertv: + case Intrinsic::spv_isinf: + case Intrinsic::spv_isnan: + case Intrinsic::spv_lerp: + case Intrinsic::spv_length: + case Intrinsic::spv_normalize: + case Intrinsic::spv_num_subgroups: + case Intrinsic::spv_num_workgroups: + case Intrinsic::spv_ptrcast: + case Intrinsic::spv_radians: + case Intrinsic::spv_reflect: + case Intrinsic::spv_refract: + case Intrinsic::spv_resource_getpointer: + case Intrinsic::spv_resource_handlefrombinding: + case Intrinsic::spv_resource_handlefromimplicitbinding: + case Intrinsic::spv_resource_nonuniformindex: + case Intrinsic::spv_rsqrt: + case Intrinsic::spv_saturate: + case Intrinsic::spv_sdot: + case Intrinsic::spv_sign: + case Intrinsic::spv_smoothstep: + case Intrinsic::spv_step: + case Intrinsic::spv_subgroup_id: + case Intrinsic::spv_subgroup_local_invocation_id: + case Intrinsic::spv_subgroup_max_size: + case Intrinsic::spv_subgroup_size: + case Intrinsic::spv_thread_id: + case Intrinsic::spv_thread_id_in_group: + case Intrinsic::spv_udot: + case Intrinsic::spv_undef: + case Intrinsic::spv_value_md: + case Intrinsic::spv_workgroup_size: + return false; + default: + return true; + } +} + +// TODO(168736): We should make this either a flag in tabelgen +// or reduce our dependence on the global registry, so we can remove this +// function. It can easily be missed when new intrinsics are added. +static bool isOpcodeWithNoSideEffects(unsigned Opcode) { + switch (Opcode) { + case SPIRV::OpTypeVoid: + case SPIRV::OpTypeBool: + case SPIRV::OpTypeInt: + case SPIRV::OpTypeFloat: + case SPIRV::OpTypeVector: + case SPIRV::OpTypeMatrix: + case SPIRV::OpTypeImage: + case SPIRV::OpTypeSampler: + case SPIRV::OpTypeSampledImage: + case SPIRV::OpTypeArray: + case SPIRV::OpTypeRuntimeArray: + case SPIRV::OpTypeStruct: + case SPIRV::OpTypeOpaque: + case SPIRV::OpTypePointer: + case SPIRV::OpTypeFunction: + case SPIRV::OpTypeEvent: + case SPIRV::OpTypeDeviceEvent: + case SPIRV::OpTypeReserveId: + case SPIRV::OpTypeQueue: + case SPIRV::OpTypePipe: + case SPIRV::OpTypeForwardPointer: + case SPIRV::OpTypePipeStorage: + case SPIRV::OpTypeNamedBarrier: + case SPIRV::OpTypeAccelerationStructureNV: + case SPIRV::OpTypeCooperativeMatrixNV: + case SPIRV::OpTypeCooperativeMatrixKHR: + return true; + default: + return false; + } +} + bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI) { + // If there are no definitions, then assume there is some other + // side-effect that makes this instruction live. + if (MI.getNumDefs() == 0) + return false; + for (const auto &MO : MI.all_defs()) { Register Reg = MO.getReg(); - if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg)) + if (Reg.isPhysical()) { + LLVM_DEBUG(dbgs() << "Not dead: def of physical register " << Reg); return false; + } + for (const auto &UseMI : MRI.use_nodbg_instructions(Reg)) { + if (UseMI.getOpcode() != SPIRV::OpName) { + LLVM_DEBUG(dbgs() << "Not dead: def " << MO << " has use in " << UseMI); + return false; + } + } } + if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE || MI.isFakeUse() || - MI.isLifetimeMarker()) + MI.isLifetimeMarker()) { + LLVM_DEBUG( + dbgs() + << "Not dead: Opcode is LOCAL_ESCAPE, fake use, or lifetime marker.\n"); return false; - if (MI.isPHI()) + } + if (MI.isPHI()) { + LLVM_DEBUG(dbgs() << "Dead: Phi instruction with no uses.\n"); return true; + } + + // It is possible that the only side effect is that the instruction is + // referenced in the global registry. If that is the only side effect, the + // intrinsic is dead. + if (MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS || + MI.getOpcode() == TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS) { + const auto &Intr = cast(MI); + if (!intrinsicHasSideEffects(Intr.getIntrinsicID())) { + LLVM_DEBUG(dbgs() << "Dead: Intrinsic with no real side effects.\n"); + return true; + } + } + if (MI.mayStore() || MI.isCall() || (MI.mayLoad() && MI.hasOrderedMemoryRef()) || MI.isPosition() || - MI.isDebugInstr() || MI.isTerminator() || MI.isJumpTableDebugInfo()) + MI.isDebugInstr() || MI.isTerminator() || MI.isJumpTableDebugInfo()) { + LLVM_DEBUG(dbgs() << "Not dead: instruction has side effects.\n"); return false; - return true; + } + + if (isPreISelGenericOpcode(MI.getOpcode())) { + // TODO: Is there a generic way to check if the opcode has side effects? + LLVM_DEBUG(dbgs() << "Dead: Generic opcode with no uses.\n"); + return true; + } + + if (isOpcodeWithNoSideEffects(MI.getOpcode())) { + LLVM_DEBUG(dbgs() << "Dead: known opcode with no side effects\n"); + return true; + } + + return false; +} + +void SPIRVInstructionSelector::removeOpNamesForDeadMI(MachineInstr &MI) const { + // Delete the OpName that uses the result if there is one. + for (const auto &MO : MI.all_defs()) { + Register Reg = MO.getReg(); + if (Reg.isPhysical()) + continue; + SmallVector UselessOpNames; + for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { + assert(UseMI.getOpcode() == SPIRV::OpName && + "There is still a use of the dead function."); + UselessOpNames.push_back(&UseMI); + } + for (MachineInstr *OpNameMI : UselessOpNames) { + GR.invalidateMachineInstr(OpNameMI); + OpNameMI->eraseFromParent(); + } + } +} + +void SPIRVInstructionSelector::removeDeadInstruction(MachineInstr &MI) const { + salvageDebugInfo(*MRI, MI); + GR.invalidateMachineInstr(&MI); + removeOpNamesForDeadMI(MI); + MI.eraseFromParent(); } bool SPIRVInstructionSelector::select(MachineInstr &I) { @@ -533,6 +719,13 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) { assert(I.getParent() && "Instruction should be in a basic block!"); assert(I.getParent()->getParent() && "Instruction should be in a function!"); + LLVM_DEBUG(dbgs() << "Checking if instruction is dead: " << I;); + if (isDead(I, *MRI)) { + LLVM_DEBUG(dbgs() << "Instruction is dead.\n"); + removeDeadInstruction(I); + return true; + } + Register Opcode = I.getOpcode(); // If it's not a GMIR instruction, we've selected it already. if (!isPreISelGenericOpcode(Opcode)) { @@ -584,9 +777,7 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) { // if the instruction has been already made dead by folding it away // erase it LLVM_DEBUG(dbgs() << "Instruction is folded and dead.\n"); - salvageDebugInfo(*MRI, I); - GR.invalidateMachineInstr(&I); - I.eraseFromParent(); + removeDeadInstruction(I); return true; } @@ -1435,50 +1626,79 @@ bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const { .constrainAllUses(TII, TRI, RBI); } -bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, - MachineInstr &I) const { +Register +SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &I) const { + MachineIRBuilder MIRBuilder(I); + assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); + + // TODO: check if we have such GV, add init, use buildGlobalVariable. + unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); + Function &CurFunction = GR.CurMF->getFunction(); + Type *LLVMArrTy = + ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num); + GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy, + true, GlobalValue::InternalLinkage, + Constant::getNullValue(LLVMArrTy)); + + Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext()); + Type *ArrTy = ArrayType::get(ValTy, Num); + SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( + ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant); + + SPIRVType *SpvArrTy = GR.getOrCreateSPIRVType( + ArrTy, MIRBuilder, SPIRV::AccessQualifier::None, false); + + unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); + Register Const = GR.getOrCreateConstIntArray(Val, Num, I, SpvArrTy, TII); + + Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); + auto MIBVar = + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) + .addDef(VarReg) + .addUse(GR.getSPIRVTypeID(VarTy)) + .addImm(SPIRV::StorageClass::UniformConstant) + .addUse(Const); + if (!MIBVar.constrainAllUses(TII, TRI, RBI)) + return Register(); + + GR.add(GV, MIBVar); + GR.addGlobalObject(GV, GR.CurMF, VarReg); + + buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); + return VarReg; +} + +bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &I, + Register SrcReg) const { MachineBasicBlock &BB = *I.getParent(); - Register SrcReg = I.getOperand(1).getReg(); - bool Result = true; - if (I.getOpcode() == TargetOpcode::G_MEMSET) { + Register DstReg = I.getOperand(0).getReg(); + SPIRVType *DstTy = GR.getSPIRVTypeForVReg(DstReg); + SPIRVType *SrcTy = GR.getSPIRVTypeForVReg(SrcReg); + if (GR.getPointeeType(DstTy) != GR.getPointeeType(SrcTy)) + report_fatal_error("OpCopyMemory requires operands to have the same type"); + uint64_t CopySize = getIConstVal(I.getOperand(2).getReg(), MRI); + SPIRVType *PointeeTy = GR.getPointeeType(DstTy); + const Type *LLVMPointeeTy = GR.getTypeForSPIRVType(PointeeTy); + if (!LLVMPointeeTy) + report_fatal_error( + "Unable to determine pointee type size for OpCopyMemory"); + const DataLayout &DL = I.getMF()->getFunction().getDataLayout(); + if (CopySize != DL.getTypeStoreSize(const_cast(LLVMPointeeTy))) + report_fatal_error( + "OpCopyMemory requires the size to match the pointee type size"); + auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemory)) + .addUse(DstReg) + .addUse(SrcReg); + if (I.getNumMemOperands()) { MachineIRBuilder MIRBuilder(I); - assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); - unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); - unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI); - Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext()); - Type *ArrTy = ArrayType::get(ValTy, Num); - SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType( - ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant); - - SPIRVType *SpvArrTy = GR.getOrCreateSPIRVType( - ArrTy, MIRBuilder, SPIRV::AccessQualifier::None, false); - Register Const = GR.getOrCreateConstIntArray(Val, Num, I, SpvArrTy, TII); - // TODO: check if we have such GV, add init, use buildGlobalVariable. - Function &CurFunction = GR.CurMF->getFunction(); - Type *LLVMArrTy = - ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num); - // Module takes ownership of the global var. - GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy, - true, GlobalValue::InternalLinkage, - Constant::getNullValue(LLVMArrTy)); - Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); - auto MIBVar = - BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) - .addDef(VarReg) - .addUse(GR.getSPIRVTypeID(VarTy)) - .addImm(SPIRV::StorageClass::UniformConstant) - .addUse(Const); - Result &= MIBVar.constrainAllUses(TII, TRI, RBI); - - GR.add(GV, MIBVar); - GR.addGlobalObject(GV, GR.CurMF, VarReg); - - buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); - SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( - ValTy, I, SPIRV::StorageClass::UniformConstant); - SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); - selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast); + addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR); } + return MIB.constrainAllUses(TII, TRI, RBI); +} + +bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &I, + Register SrcReg) const { + MachineBasicBlock &BB = *I.getParent(); auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized)) .addUse(I.getOperand(0).getReg()) .addUse(SrcReg) @@ -1487,9 +1707,30 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, MachineIRBuilder MIRBuilder(I); addMemoryOperands(*I.memoperands_begin(), MIB, MIRBuilder, GR); } - Result &= MIB.constrainAllUses(TII, TRI, RBI); - if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) - Result &= BuildCOPY(ResVReg, MIB->getOperand(0).getReg(), I); + return MIB.constrainAllUses(TII, TRI, RBI); +} + +bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, + MachineInstr &I) const { + Register SrcReg = I.getOperand(1).getReg(); + bool Result = true; + if (I.getOpcode() == TargetOpcode::G_MEMSET) { + Register VarReg = getOrCreateMemSetGlobal(I); + if (!VarReg.isValid()) + return false; + Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext()); + SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( + ValTy, I, SPIRV::StorageClass::UniformConstant); + SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); + Result &= selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast); + } + if (STI.isLogicalSPIRV()) { + Result &= selectCopyMemory(I, SrcReg); + } else { + Result &= selectCopyMemorySized(I, SrcReg); + } + if (ResVReg.isValid() && ResVReg != I.getOperand(0).getReg()) + Result &= BuildCOPY(ResVReg, I.getOperand(0).getReg(), I); return Result; } @@ -3088,6 +3329,11 @@ bool SPIRVInstructionSelector::selectGEP(Register ResVReg, .addUse(GR.getSPIRVTypeID(ResType)) // Object to get a pointer to. .addUse(I.getOperand(3).getReg()); + assert(Opcode == SPIRV::OpPtrAccessChain || + Opcode == SPIRV::OpInBoundsPtrAccessChain || + (getImm(I.getOperand(4), MRI) && foldImm(I.getOperand(4), MRI) == 0) && + "Cannot translate GEP to OpAccessChain. First index must be 0."); + // Adding indices. const unsigned StartingIndex = (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain) diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp index 4ce871b6f5e5d..81c7596530ee2 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp @@ -104,9 +104,13 @@ class SPIRVLegalizePointerCast : public FunctionPass { Value *loadFirstValueFromAggregate(IRBuilder<> &B, Type *ElementType, Value *Source, LoadInst *BadLoad) { SmallVector Types = {BadLoad->getPointerOperandType(), - BadLoad->getPointerOperandType()}; - SmallVector Args{/* isInBounds= */ B.getInt1(false), Source, - B.getInt32(0), B.getInt32(0)}; + Source->getType()}; + SmallVector Args{/* isInBounds= */ B.getInt1(false), Source}; + + Type *AggregateType = GR->findDeducedElementType(Source); + assert(AggregateType && "Could not deduce aggregate type"); + buildGEPIndexChain(B, ElementType, AggregateType, Args); + auto *GEP = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); GR->buildAssignPtr(B, ElementType, GEP); @@ -201,34 +205,20 @@ class SPIRVLegalizePointerCast : public FunctionPass { auto *SAT = dyn_cast(FromTy); auto *SVT = dyn_cast(FromTy); - auto *SST = dyn_cast(FromTy); auto *DVT = dyn_cast(ToTy); B.SetInsertPoint(LI); - // Destination is the element type of Source, and source is an array -> - // Loading 1st element. + // Destination is the element type of some member of FromTy. For example, + // loading the 1st element of an array: // - float a = array[0]; - if (SAT && SAT->getElementType() == ToTy) - Output = loadFirstValueFromAggregate(B, SAT->getElementType(), - OriginalOperand, LI); - // Destination is the element type of Source, and source is a vector -> - // Vector to scalar. - // - float a = vector.x; - else if (!DVT && SVT && SVT->getElementType() == ToTy) { - Output = loadFirstValueFromAggregate(B, SVT->getElementType(), - OriginalOperand, LI); - } + if (isTypeFirstElementAggregate(ToTy, FromTy)) + Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI); // Destination is a smaller vector than source or different vector type. // - float3 v3 = vector4; // - float4 v2 = int4; else if (SVT && DVT) Output = loadVectorFromVector(B, SVT, DVT, OriginalOperand); - // Destination is the scalar type stored at the start of an aggregate. - // - struct S { float m }; - // - float v = s.m; - else if (SST && SST->getTypeAtIndex(0u) == ToTy) - Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI); else if (SAT && DVT && SAT->getElementType() == DVT->getElementType()) Output = loadVectorFromArray(B, DVT, OriginalOperand); else @@ -334,7 +324,7 @@ class SPIRVLegalizePointerCast : public FunctionPass { Value *storeToFirstValueAggregate(IRBuilder<> &B, Value *Src, Value *Dst, Type *DstPointeeType, Align Alignment) { SmallVector Types = {Dst->getType(), Dst->getType()}; - SmallVector Args{/* isInBounds= */ B.getInt1(true), Dst}; + SmallVector Args{/* isInBounds= */ B.getInt1(true), Dst}; buildGEPIndexChain(B, Src->getType(), DstPointeeType, Args); auto *GEP = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); GR->buildAssignPtr(B, Src->getType(), GEP); diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp index 8f2fc01da476f..7fdb0fafa3719 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp @@ -1042,6 +1042,75 @@ getFirstValidInstructionInsertPoint(MachineBasicBlock &BB) { : VarPos; } +bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, + uint64_t &TotalSize) { + // An array of N padded structs is represented as {[N-1 x <{T, pad}>], T}. + if (Ty->getStructNumElements() != 2) + return false; + + Type *FirstElement = Ty->getStructElementType(0); + Type *SecondElement = Ty->getStructElementType(1); + + if (!FirstElement->isArrayTy()) + return false; + + Type *ArrayElementType = FirstElement->getArrayElementType(); + if (!ArrayElementType->isStructTy() || + ArrayElementType->getStructNumElements() != 2) + return false; + + Type *T_in_struct = ArrayElementType->getStructElementType(0); + if (T_in_struct != SecondElement) + return false; + + auto *Padding_in_struct = + dyn_cast(ArrayElementType->getStructElementType(1)); + if (!Padding_in_struct || Padding_in_struct->getName() != "spirv.Padding") + return false; + + const uint64_t ArraySize = FirstElement->getArrayNumElements(); + TotalSize = ArraySize + 1; + OriginalElementType = ArrayElementType; + return true; +} + +Type *reconstitutePeeledArrayType(Type *Ty) { + if (!Ty->isStructTy()) + return Ty; + + auto *STy = cast(Ty); + Type *OriginalElementType = nullptr; + uint64_t TotalSize = 0; + if (matchPeeledArrayPattern(STy, OriginalElementType, TotalSize)) { + Type *ResultTy = ArrayType::get( + reconstitutePeeledArrayType(OriginalElementType), TotalSize); + return ResultTy; + } + + SmallVector NewElementTypes; + bool Changed = false; + for (Type *ElementTy : STy->elements()) { + Type *NewElementTy = reconstitutePeeledArrayType(ElementTy); + if (NewElementTy != ElementTy) + Changed = true; + NewElementTypes.push_back(NewElementTy); + } + + if (!Changed) + return Ty; + + Type *ResultTy; + if (STy->isLiteral()) + ResultTy = + StructType::get(STy->getContext(), NewElementTypes, STy->isPacked()); + else { + auto *NewTy = StructType::create(STy->getContext(), STy->getName()); + NewTy->setBody(NewElementTypes, STy->isPacked()); + ResultTy = NewTy; + } + return ResultTy; +} + std::optional getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV) { if (GV.hasLocalLinkage() || GV.hasHiddenVisibility()) diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h index 99d9d403ea70c..45e211a1e5d2a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.h +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h @@ -321,6 +321,21 @@ Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx); // Returns true if the function was changed. bool sortBlocks(Function &F); +// Check for peeled array structs and recursively reconstitute them. In HLSL +// CBuffers, arrays may have padding between the elements, but not after the +// last element. To represent this in LLVM IR an array [N x T] will be +// represented as {[N-1 x {T, spirv.Padding}], T}. The function +// matchPeeledArrayPattern recognizes this pattern retrieving the type {T, +// spirv.Padding}, and the size N. +bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType, + uint64_t &TotalSize); + +// This function will turn the type {[N-1 x {T, spirv.Padding}], T} back into +// [N x {T, spirv.Padding}]. So it can be translated into SPIR-V. The offset +// decorations will be such that there will be no padding after the array when +// relevant. +Type *reconstitutePeeledArrayType(Type *Ty); + inline bool hasInitializer(const GlobalVariable *GV) { return GV->hasInitializer() && !isa(GV->getInitializer()); } diff --git a/llvm/lib/Target/Sparc/Sparc.td b/llvm/lib/Target/Sparc/Sparc.td index 38b0508885069..ecf82fab5cc41 100644 --- a/llvm/lib/Target/Sparc/Sparc.td +++ b/llvm/lib/Target/Sparc/Sparc.td @@ -126,6 +126,8 @@ include "SparcCallingConv.td" include "SparcSchedule.td" include "SparcInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def SparcInstrInfo : InstrInfo; def SparcAsmParser : AsmParser { diff --git a/llvm/lib/Target/SystemZ/SystemZ.td b/llvm/lib/Target/SystemZ/SystemZ.td index ec110645c62dd..95f039d6328f3 100644 --- a/llvm/lib/Target/SystemZ/SystemZ.td +++ b/llvm/lib/Target/SystemZ/SystemZ.td @@ -57,6 +57,9 @@ include "SystemZInstrHFP.td" include "SystemZInstrDFP.td" include "SystemZInstrSystem.td" + +defm : RemapAllTargetPseudoPointerOperands; + def SystemZInstrInfo : InstrInfo { let guessInstructionProperties = 0; } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp index e31d7c6a86476..f061272d3fad4 100644 --- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -1270,7 +1270,7 @@ void SystemZAsmPrinter::emitFunctionBodyEnd() { static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, bool StackProtector, bool FPRMask, bool VRMask, - bool EHBlock, bool HasName) { + bool EHBlock, bool HasArgAreaLength, bool HasName) { enum class PPA1Flag1 : uint8_t { DSA64Bit = (0x80 >> 0), VarArg = (0x80 >> 7), @@ -1282,8 +1282,9 @@ static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, LLVM_MARK_AS_BITMASK_ENUM(ExternalProcedure) }; enum class PPA1Flag3 : uint8_t { + HasArgAreaLength = (0x80 >> 1), FPRMask = (0x80 >> 2), - LLVM_MARK_AS_BITMASK_ENUM(FPRMask) + LLVM_MARK_AS_BITMASK_ENUM(HasArgAreaLength) }; enum class PPA1Flag4 : uint8_t { EPMOffsetPresent = (0x80 >> 0), @@ -1307,6 +1308,9 @@ static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, if (StackProtector) Flags2 |= PPA1Flag2::STACKPROTECTOR; + if (HasArgAreaLength) + Flags3 |= PPA1Flag3::HasArgAreaLength; // Add emit ArgAreaLength flag. + // SavedGPRMask, SavedFPRMask, and SavedVRMask are precomputed in. if (FPRMask) Flags3 |= PPA1Flag3::FPRMask; // Add emit FPR mask flag. @@ -1339,6 +1343,9 @@ static void emitPPA1Flags(std::unique_ptr &OutStreamer, bool VarArg, OutStreamer->emitInt8(static_cast(Flags2)); // Flags 2. OutStreamer->AddComment("PPA1 Flags 3"); + if ((Flags3 & PPA1Flag3::HasArgAreaLength) == PPA1Flag3::HasArgAreaLength) + OutStreamer->AddComment( + " Bit 1: 1 = Argument Area Length is in optional area"); if ((Flags3 & PPA1Flag3::FPRMask) == PPA1Flag3::FPRMask) OutStreamer->AddComment(" Bit 2: 1 = FP Reg Mask is in optional area"); OutStreamer->emitInt8( @@ -1477,12 +1484,26 @@ void SystemZAsmPrinter::emitPPA1(MCSymbol *FnEndSym) { bool NeedEmitEHBlock = !MF->getLandingPads().empty(); + // Optional Argument Area Length. + // Note: This represents the length of the argument area that we reserve + // in our stack for setting up arguments for calls to other + // routines. If this optional field is not set, LE will reserve + // 128 bytes for the argument area. This optional field is + // created if greater than 128 bytes is required - to guarantee + // the required space is reserved on stack extension in the new + // extension. This optional field is also created if the + // routine has alloca(). This may reduce stack space + // if alloca() call causes a stack extension. + bool HasArgAreaLength = + (AllocaReg != 0) || (MFFrame.getMaxCallFrameSize() > 128); + bool HasName = MF->getFunction().hasName() && MF->getFunction().getName().size() > 0; emitPPA1Flags(OutStreamer, MF->getFunction().isVarArg(), MFFrame.hasStackProtectorIndex(), SavedFPRMask != 0, - TargetHasVector && SavedVRMask != 0, NeedEmitEHBlock, HasName); + TargetHasVector && SavedVRMask != 0, NeedEmitEHBlock, + HasArgAreaLength, HasName); OutStreamer->AddComment("Length/4 of Parms"); OutStreamer->emitInt16( @@ -1490,6 +1511,11 @@ void SystemZAsmPrinter::emitPPA1(MCSymbol *FnEndSym) { OutStreamer->AddComment("Length of Code"); OutStreamer->emitAbsoluteSymbolDiff(FnEndSym, CurrentFnEPMarkerSym, 4); + if (HasArgAreaLength) { + OutStreamer->AddComment("Argument Area Length"); + OutStreamer->emitInt32(MFFrame.getMaxCallFrameSize()); + } + // Emit saved FPR mask and offset to FPR save area (0x20 of flags 3). if (SavedFPRMask) { OutStreamer->AddComment("FPR mask"); diff --git a/llvm/lib/Target/VE/VE.td b/llvm/lib/Target/VE/VE.td index bb076bd9f6d41..aedce0f4ebc8f 100644 --- a/llvm/lib/Target/VE/VE.td +++ b/llvm/lib/Target/VE/VE.td @@ -30,6 +30,7 @@ include "VERegisterInfo.td" include "VECallingConv.td" include "VEInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; def VEInstrInfo : InstrInfo {} def VEAsmParser : AsmParser { diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.td b/llvm/lib/Target/WebAssembly/WebAssembly.td index 089be5f1dc70e..67015ffcfc760 100644 --- a/llvm/lib/Target/WebAssembly/WebAssembly.td +++ b/llvm/lib/Target/WebAssembly/WebAssembly.td @@ -108,6 +108,14 @@ include "WebAssemblyRegisterInfo.td" include "WebAssemblyInstrInfo.td" +def WASM64 : HwMode<[HasAddr64]>; + +def wasm_ptr_rc : RegClassByHwMode< + [DefaultMode, WASM64], + [I32, I64]>; + +defm : RemapAllTargetPseudoPointerOperands; + def WebAssemblyInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp index 1ef10928c05d8..abbb0c2466e7d 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp @@ -65,7 +65,7 @@ void X86WinCOFFStreamer::emitCVFPOData(const MCSymbol *ProcSym, SMLoc Loc) { } void X86WinCOFFStreamer::finishImpl() { - emitFrames(nullptr); + emitFrames(); emitWindowsUnwindTables(); MCWinCOFFStreamer::finishImpl(); diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index 27ec052cfda40..8f29a64d58194 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -795,6 +795,8 @@ include "X86Schedule.td" include "X86InstrInfo.td" include "X86SchedPredicates.td" +defm : RemapAllTargetPseudoPointerOperands; + def X86InstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/XCore/XCore.td b/llvm/lib/Target/XCore/XCore.td index a97b3dd1d0a2b..fa8b9fe26bbe1 100644 --- a/llvm/lib/Target/XCore/XCore.td +++ b/llvm/lib/Target/XCore/XCore.td @@ -24,6 +24,8 @@ include "XCoreRegisterInfo.td" include "XCoreInstrInfo.td" include "XCoreCallingConv.td" +defm : RemapAllTargetPseudoPointerOperands; + def XCoreInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 4ef885e19101e..eecf42f46f88b 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -44,6 +44,8 @@ include "XtensaCallingConv.td" include "XtensaInstrInfo.td" +defm : RemapAllTargetPseudoPointerOperands; + def XtensaInstrInfo : InstrInfo; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 8e4edefec42fd..9543d97616ae3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3077,6 +3077,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { } case Intrinsic::ptrauth_auth: case Intrinsic::ptrauth_resign: { + // We don't support this optimization on intrinsic calls with deactivation + // symbols, which are represented using operand bundles. + if (II->hasOperandBundles()) + break; + // (sign|resign) + (auth|resign) can be folded by omitting the middle // sign+auth component if the key and discriminator match. bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign; @@ -3088,6 +3093,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { // whatever we replace this sequence with. Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr; if (const auto *CI = dyn_cast(Ptr)) { + // We don't support this optimization on intrinsic calls with deactivation + // symbols, which are represented using operand bundles. + if (CI->hasOperandBundles()) + break; + BasePtr = CI->getArgOperand(0); if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) { if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc) @@ -3110,9 +3120,10 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { if (NeedSign && isa(II->getArgOperand(4))) { auto *SignKey = cast(II->getArgOperand(3)); auto *SignDisc = cast(II->getArgOperand(4)); - auto *SignAddrDisc = ConstantPointerNull::get(Builder.getPtrTy()); + auto *Null = ConstantPointerNull::get(Builder.getPtrTy()); auto *NewCPA = ConstantPtrAuth::get(CPA->getPointer(), SignKey, - SignDisc, SignAddrDisc); + SignDisc, /*AddrDisc=*/Null, + /*DeactivationSymbol=*/Null); replaceInstUsesWith( *II, ConstantExpr::getPointerCast(NewCPA, II->getType())); return eraseInstFromFunction(*II); diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp index 9239ae8741afb..b5a8f79e26436 100644 --- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -178,6 +178,8 @@ getRuntimeCallName(const BoundsCheckingPass::Options::Runtime &Opts) { Name += "_minimal"; if (!Opts.MayReturn) Name += "_abort"; + else if (Opts.HandlerPreserveAllRegs) + Name += "_preserve"; return Name; } @@ -267,7 +269,10 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI, TrapCall->setDoesNotReturn(); IRB.CreateUnreachable(); } - + // The preserve-all logic is somewhat duplicated in CGExpr.cpp for + // local-bounds. Make sure to change that too. + if (Opts.Rt && Opts.Rt->HandlerPreserveAllRegs && MayReturn) + TrapCall->setCallingConv(CallingConv::PreserveAll); if (!MayReturn && SingleTrapBB && !DebugTrapBB) ReuseTrapBB = TrapBB; diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp index b46527eb1057b..19d801acd928e 100644 --- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -1855,7 +1855,7 @@ bool IndVarSimplify::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) { // is that enough for *all* side effects? bool HasThreadLocalSideEffects = false; for (BasicBlock *BB : L->blocks()) - for (auto &I : *BB) + for (auto &I : *BB) { // TODO:isGuaranteedToTransfer if (I.mayHaveSideEffects()) { if (!LoopPredicationTraps) @@ -1873,6 +1873,18 @@ bool IndVarSimplify::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) { } } + // Skip if the loop has tokens referenced outside the loop to avoid + // changing convergence behavior. + if (I.getType()->isTokenTy()) { + for (User *U : I.users()) { + Instruction *UserInst = dyn_cast(U); + if (UserInst && !L->contains(UserInst)) { + return false; + } + } + } + } + bool Changed = false; // Finally, do the actual predication for all predicatable blocks. A couple // of notes here: diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp index 9021d8b289baf..6e36006890df4 100644 --- a/llvm/lib/Transforms/Utils/ValueMapper.cpp +++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp @@ -526,8 +526,9 @@ Value *Mapper::mapValue(const Value *V) { if (isa(C)) return getVM()[V] = ConstantVector::get(Ops); if (isa(C)) - return getVM()[V] = ConstantPtrAuth::get(Ops[0], cast(Ops[1]), - cast(Ops[2]), Ops[3]); + return getVM()[V] = + ConstantPtrAuth::get(Ops[0], cast(Ops[1]), + cast(Ops[2]), Ops[3], Ops[4]); // If this is a no-operand constant, it must be because the type was remapped. if (isa(C)) return getVM()[V] = PoisonValue::get(NewTy); diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index 6d24c407eb5f4..c28314f6ab124 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -953,15 +953,15 @@ bool Vectorizer::vectorizeChain(Chain &C) { unsigned EOffset = (E.OffsetFromLeader - C[0].OffsetFromLeader).getZExtValue(); unsigned VecIdx = 8 * EOffset / DL.getTypeSizeInBits(VecElemTy); - if (auto *VT = dyn_cast(T)) { + if (!VecTy->isVectorTy()) { + V = VecInst; + } else if (auto *VT = dyn_cast(T)) { auto Mask = llvm::to_vector<8>( llvm::seq(VecIdx, VecIdx + VT->getNumElements())); V = Builder.CreateShuffleVector(VecInst, Mask, I->getName()); - } else if (VecTy != VecElemTy) { + } else { V = Builder.CreateExtractElement(VecInst, Builder.getInt32(VecIdx), I->getName()); - } else { - V = VecInst; } if (V->getType() != I->getType()) V = Builder.CreateBitOrPointerCast(V, I->getType()); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index ba21bbbe112e6..379f4e6602a7d 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -2095,24 +2095,6 @@ bool LoopVectorizationLegality::canFoldTailByMasking() const { for (const auto &Reduction : getReductionVars()) ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr()); - // TODO: handle non-reduction outside users when tail is folded by masking. - for (auto *AE : AllowedExit) { - // Check that all users of allowed exit values are inside the loop or - // are the live-out of a reduction. - if (ReductionLiveOuts.count(AE)) - continue; - for (User *U : AE->users()) { - Instruction *UI = cast(U); - if (TheLoop->contains(UI)) - continue; - LLVM_DEBUG( - dbgs() - << "LV: Cannot fold tail by masking, loop has an outside user for " - << *UI << "\n"); - return false; - } - } - for (const auto &Entry : getInductionVars()) { PHINode *OrigPhi = Entry.first; for (User *U : OrigPhi->users()) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index a63956c0cba6b..7319e4f74433d 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7021,10 +7021,11 @@ static bool planContainsAdditionalSimplifications(VPlan &Plan, VPInstruction::FirstOrderRecurrenceSplice>()))) return true; } - // The VPlan-based cost model is more accurate for partial reduction and + // The VPlan-based cost model is more accurate for partial reductions and // comparing against the legacy cost isn't desirable. - if (isa(&R)) - return true; + if (auto *VPR = dyn_cast(&R)) + if (VPR->isPartialReduction()) + return true; // The VPlan-based cost model can analyze if recipes are scalar // recursively, but the legacy cost model cannot. @@ -8207,11 +8208,15 @@ VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(VPSingleDefRecipe *R, Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); // If the PHI is used by a partial reduction, set the scale factor. + bool UseInLoopReduction = CM.isInLoopReduction(Phi); + bool UseOrderedReductions = CM.useOrderedReductions(RdxDesc); unsigned ScaleFactor = getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1); + PhiRecipe = new VPReductionPHIRecipe( - Phi, RdxDesc.getRecurrenceKind(), *StartV, CM.isInLoopReduction(Phi), - CM.useOrderedReductions(RdxDesc), ScaleFactor); + Phi, RdxDesc.getRecurrenceKind(), *StartV, + getReductionStyle(UseInLoopReduction, UseOrderedReductions, + ScaleFactor)); } else { // TODO: Currently fixed-order recurrences are modeled as chains of // first-order recurrences. If there are no users of the intermediate @@ -8280,16 +8285,18 @@ VPRecipeBuilder::tryToCreatePartialReduction(VPInstruction *Reduction, VPValue *BinOp = Reduction->getOperand(0); VPValue *Accumulator = Reduction->getOperand(1); - if (isa(BinOp) || isa(BinOp)) + VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe(); + if (isa(BinOpRecipe) || + (isa(BinOpRecipe) && + cast(BinOpRecipe)->isPartialReduction())) std::swap(BinOp, Accumulator); assert(ScaleFactor == vputils::getVFScaleFactor(Accumulator->getDefiningRecipe()) && "all accumulators in chain must have same scale factor"); - unsigned ReductionOpcode = Reduction->getOpcode(); auto *ReductionI = Reduction->getUnderlyingInstr(); - if (ReductionOpcode == Instruction::Sub) { + if (Reduction->getOpcode() == Instruction::Sub) { auto *const Zero = ConstantInt::get(ReductionI->getType(), 0); SmallVector Ops; Ops.push_back(Plan.getOrAddLiveIn(Zero)); @@ -8297,14 +8304,15 @@ VPRecipeBuilder::tryToCreatePartialReduction(VPInstruction *Reduction, BinOp = new VPWidenRecipe(*ReductionI, Ops, VPIRFlags(*ReductionI), VPIRMetadata(), ReductionI->getDebugLoc()); Builder.insert(BinOp->getDefiningRecipe()); - ReductionOpcode = Instruction::Add; } VPValue *Cond = nullptr; if (CM.blockNeedsPredicationForAnyReason(ReductionI->getParent())) Cond = getBlockInMask(Builder.getInsertBlock()); - return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond, - ScaleFactor, ReductionI); + + return new VPReductionRecipe( + RecurKind::Add, FastMathFlags(), ReductionI, Accumulator, BinOp, Cond, + RdxUnordered{/*VFScaleFactor=*/ScaleFactor}, ReductionI->getDebugLoc()); } void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, @@ -8336,6 +8344,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, if (auto Plan = tryToBuildVPlanWithVPRecipes( std::unique_ptr(VPlan0->duplicate()), SubRange, &LVer)) { // Now optimize the initial VPlan. + VPlanTransforms::hoistPredicatedLoads(*Plan, *PSE.getSE(), OrigLoop); VPlanTransforms::runPass(VPlanTransforms::truncateToMinimalBitwidths, *Plan, CM.getMinimalBitwidths()); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); @@ -8794,9 +8803,10 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent())) CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent()); - auto *RedRecipe = new VPReductionRecipe( - Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp, - PhiR->isOrdered(), CurrentLinkI->getDebugLoc()); + ReductionStyle Style = getReductionStyle(true, PhiR->isOrdered(), 1); + auto *RedRecipe = + new VPReductionRecipe(Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, + CondOp, Style, CurrentLinkI->getDebugLoc()); // Append the recipe to the end of the VPBasicBlock because we need to // ensure that it comes after all of it's inputs, including CondOp. // Delete CurrentLink as it will be invalid if its operand is replaced @@ -8831,8 +8841,9 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( // Don't output selects for partial reductions because they have an output // with fewer lanes than the VF. So the operands of the select would have // different numbers of lanes. Partial reductions mask the input instead. + auto *RR = dyn_cast(OrigExitingVPV->getDefiningRecipe()); if (!PhiR->isInLoop() && CM.foldTailByMasking() && - !isa(OrigExitingVPV)) { + (!RR || !RR->isPartialReduction())) { VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent()); std::optional FMFs = PhiTy->isFloatingPointTy() @@ -8929,7 +8940,8 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( if (FinalReductionResult == U || Parent->getParent()) continue; U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult); - if (match(U, m_ExtractLastElement(m_VPValue()))) + if (match(U, m_CombineOr(m_ExtractLastElement(m_VPValue()), + m_ExtractLane(m_VPValue(), m_VPValue())))) cast(U)->replaceAllUsesWith(FinalReductionResult); } diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 0c7d9c0193a03..a464d019754ba 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -44,6 +44,7 @@ #include #include #include +#include namespace llvm { @@ -566,7 +567,6 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue { case VPRecipeBase::VPWidenIntOrFpInductionSC: case VPRecipeBase::VPWidenPointerInductionSC: case VPRecipeBase::VPReductionPHISC: - case VPRecipeBase::VPPartialReductionSC: return true; case VPRecipeBase::VPBranchOnMaskSC: case VPRecipeBase::VPInterleaveEVLSC: @@ -1099,6 +1099,13 @@ class LLVM_ABI_FOR_TEST VPInstruction : public VPRecipeWithIRFlags, // Implemented with @llvm.experimental.cttz.elts, but returns the expected // result even with operands that are all zeroes. FirstActiveLane, + // Calculates the last active lane index of the vector predicate operands. + // The predicates must be prefix-masks (all 1s before all 0s). Used when + // tail-folding to extract the correct live-out value from the last active + // iteration. It produces the lane index across all unrolled iterations. + // Unrolling will add all copies of its original operand as additional + // operands. + LastActiveLane, // The opcodes below are used for VPInstructionWithType. // @@ -2392,6 +2399,29 @@ struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe { #endif }; +/// Possible variants of a reduction. + +/// This reduction is ordered and in-loop. +struct RdxOrdered {}; +/// This reduction is in-loop. +struct RdxInLoop {}; +/// This reduction is unordered with the partial result scaled down by some +/// factor. +struct RdxUnordered { + unsigned VFScaleFactor; +}; +using ReductionStyle = std::variant; + +inline ReductionStyle getReductionStyle(bool InLoop, bool Ordered, + unsigned ScaleFactor) { + assert((!Ordered || InLoop) && "Ordered implies in-loop"); + if (Ordered) + return RdxOrdered{}; + if (InLoop) + return RdxInLoop{}; + return RdxUnordered{/*VFScaleFactor=*/ScaleFactor}; +} + /// A recipe for handling reduction phis. The start value is the first operand /// of the recipe and the incoming value from the backedge is the second /// operand. @@ -2400,32 +2430,21 @@ class VPReductionPHIRecipe : public VPHeaderPHIRecipe, /// The recurrence kind of the reduction. const RecurKind Kind; - /// The phi is part of an in-loop reduction. - bool IsInLoop; - - /// The phi is part of an ordered reduction. Requires IsInLoop to be true. - bool IsOrdered; - - /// When expanding the reduction PHI, the plan's VF element count is divided - /// by this factor to form the reduction phi's VF. - unsigned VFScaleFactor = 1; + ReductionStyle Style; public: /// Create a new VPReductionPHIRecipe for the reduction \p Phi. VPReductionPHIRecipe(PHINode *Phi, RecurKind Kind, VPValue &Start, - bool IsInLoop = false, bool IsOrdered = false, - unsigned VFScaleFactor = 1) + ReductionStyle Style) : VPHeaderPHIRecipe(VPDef::VPReductionPHISC, Phi, &Start), Kind(Kind), - IsInLoop(IsInLoop), IsOrdered(IsOrdered), VFScaleFactor(VFScaleFactor) { - assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop"); - } + Style(Style) {} ~VPReductionPHIRecipe() override = default; VPReductionPHIRecipe *clone() override { auto *R = new VPReductionPHIRecipe( dyn_cast_or_null(getUnderlyingValue()), getRecurrenceKind(), - *getOperand(0), IsInLoop, IsOrdered, VFScaleFactor); + *getOperand(0), Style); R->addOperand(getBackedgeValue()); return R; } @@ -2435,8 +2454,12 @@ class VPReductionPHIRecipe : public VPHeaderPHIRecipe, /// Generate the phi/select nodes. void execute(VPTransformState &State) override; - /// Get the factor that the VF of this recipe's output should be scaled by. - unsigned getVFScaleFactor() const { return VFScaleFactor; } + /// Get the factor that the VF of this recipe's output should be scaled by, or + /// 1 if it isn't scaled. + unsigned getVFScaleFactor() const { + auto *Partial = std::get_if(&Style); + return Partial ? Partial->VFScaleFactor : 1; + } /// Returns the number of incoming values, also number of incoming blocks. /// Note that at the moment, VPWidenPointerInductionRecipe only has a single @@ -2447,10 +2470,16 @@ class VPReductionPHIRecipe : public VPHeaderPHIRecipe, RecurKind getRecurrenceKind() const { return Kind; } /// Returns true, if the phi is part of an ordered reduction. - bool isOrdered() const { return IsOrdered; } + bool isOrdered() const { return std::holds_alternative(Style); } - /// Returns true, if the phi is part of an in-loop reduction. - bool isInLoop() const { return IsInLoop; } + /// Returns true if the phi is part of an in-loop reduction. + bool isInLoop() const { + return std::holds_alternative(Style) || + std::holds_alternative(Style); + } + + /// Returns true if the reduction outputs a vector with a scaled down VF. + bool isPartialReduction() const { return getVFScaleFactor() > 1; } /// Returns true if the recipe only uses the first lane of operand \p Op. bool usesFirstLaneOnly(const VPValue *Op) const override { @@ -2732,23 +2761,25 @@ class LLVM_ABI_FOR_TEST VPInterleaveEVLRecipe final : public VPInterleaveBase { #endif }; -/// A recipe to represent inloop reduction operations, performing a reduction on -/// a vector operand into a scalar value, and adding the result to a chain. -/// The Operands are {ChainOp, VecOp, [Condition]}. +/// A recipe to represent inloop, ordered or partial reduction operations. It +/// performs a reduction on a vector operand into a scalar (vector in the case +/// of a partial reduction) value, and adds the result to a chain. The Operands +/// are {ChainOp, VecOp, [Condition]}. class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { + /// The recurrence kind for the reduction in question. RecurKind RdxKind; - bool IsOrdered; /// Whether the reduction is conditional. bool IsConditional = false; + ReductionStyle Style; protected: VPReductionRecipe(const unsigned char SC, RecurKind RdxKind, FastMathFlags FMFs, Instruction *I, ArrayRef Operands, VPValue *CondOp, - bool IsOrdered, DebugLoc DL) + ReductionStyle Style, DebugLoc DL) : VPRecipeWithIRFlags(SC, Operands, FMFs, DL), RdxKind(RdxKind), - IsOrdered(IsOrdered) { + Style(Style) { if (CondOp) { IsConditional = true; addOperand(CondOp); @@ -2759,30 +2790,29 @@ class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { public: VPReductionRecipe(RecurKind RdxKind, FastMathFlags FMFs, Instruction *I, VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp, - bool IsOrdered, DebugLoc DL = DebugLoc::getUnknown()) + ReductionStyle Style, DebugLoc DL = DebugLoc::getUnknown()) : VPReductionRecipe(VPDef::VPReductionSC, RdxKind, FMFs, I, - ArrayRef({ChainOp, VecOp}), CondOp, - IsOrdered, DL) {} + ArrayRef({ChainOp, VecOp}), CondOp, Style, + DL) {} VPReductionRecipe(const RecurKind RdxKind, FastMathFlags FMFs, VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp, - bool IsOrdered, DebugLoc DL = DebugLoc::getUnknown()) + ReductionStyle Style, DebugLoc DL = DebugLoc::getUnknown()) : VPReductionRecipe(VPDef::VPReductionSC, RdxKind, FMFs, nullptr, - ArrayRef({ChainOp, VecOp}), CondOp, - IsOrdered, DL) {} + ArrayRef({ChainOp, VecOp}), CondOp, Style, + DL) {} ~VPReductionRecipe() override = default; VPReductionRecipe *clone() override { return new VPReductionRecipe(RdxKind, getFastMathFlags(), getUnderlyingInstr(), getChainOp(), getVecOp(), - getCondOp(), IsOrdered, getDebugLoc()); + getCondOp(), Style, getDebugLoc()); } static inline bool classof(const VPRecipeBase *R) { return R->getVPDefID() == VPRecipeBase::VPReductionSC || - R->getVPDefID() == VPRecipeBase::VPReductionEVLSC || - R->getVPDefID() == VPRecipeBase::VPPartialReductionSC; + R->getVPDefID() == VPRecipeBase::VPReductionEVLSC; } static inline bool classof(const VPUser *U) { @@ -2809,9 +2839,16 @@ class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { /// Return the recurrence kind for the in-loop reduction. RecurKind getRecurrenceKind() const { return RdxKind; } /// Return true if the in-loop reduction is ordered. - bool isOrdered() const { return IsOrdered; }; + bool isOrdered() const { return std::holds_alternative(Style); }; /// Return true if the in-loop reduction is conditional. bool isConditional() const { return IsConditional; }; + /// Returns true if the reduction outputs a vector with a scaled down VF. + bool isPartialReduction() const { return getVFScaleFactor() > 1; } + /// Returns true if the reduction is in-loop. + bool isInLoop() const { + return std::holds_alternative(Style) || + std::holds_alternative(Style); + } /// The VPValue of the scalar Chain being accumulated. VPValue *getChainOp() const { return getOperand(0); } /// The VPValue of the vector value to be reduced. @@ -2820,69 +2857,12 @@ class LLVM_ABI_FOR_TEST VPReductionRecipe : public VPRecipeWithIRFlags { VPValue *getCondOp() const { return isConditional() ? getOperand(getNumOperands() - 1) : nullptr; } - -protected: -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - /// Print the recipe. - void printRecipe(raw_ostream &O, const Twine &Indent, - VPSlotTracker &SlotTracker) const override; -#endif -}; - -/// A recipe for forming partial reductions. In the loop, an accumulator and -/// vector operand are added together and passed to the next iteration as the -/// next accumulator. After the loop body, the accumulator is reduced to a -/// scalar value. -class VPPartialReductionRecipe : public VPReductionRecipe { - unsigned Opcode; - - /// The divisor by which the VF of this recipe's output should be divided - /// during execution. - unsigned VFScaleFactor; - -public: - VPPartialReductionRecipe(Instruction *ReductionInst, VPValue *Op0, - VPValue *Op1, VPValue *Cond, unsigned VFScaleFactor) - : VPPartialReductionRecipe(ReductionInst->getOpcode(), Op0, Op1, Cond, - VFScaleFactor, ReductionInst) {} - VPPartialReductionRecipe(unsigned Opcode, VPValue *Op0, VPValue *Op1, - VPValue *Cond, unsigned ScaleFactor, - Instruction *ReductionInst = nullptr) - : VPReductionRecipe(VPDef::VPPartialReductionSC, RecurKind::Add, - FastMathFlags(), ReductionInst, - ArrayRef({Op0, Op1}), Cond, false, {}), - Opcode(Opcode), VFScaleFactor(ScaleFactor) { - [[maybe_unused]] auto *AccumulatorRecipe = - getChainOp()->getDefiningRecipe(); - // When cloning as part of a VPExpressionRecipe the chain op could have - // replaced by a temporary VPValue, so it doesn't have a defining recipe. - assert((!AccumulatorRecipe || - isa(AccumulatorRecipe) || - isa(AccumulatorRecipe)) && - "Unexpected operand order for partial reduction recipe"); - } - ~VPPartialReductionRecipe() override = default; - - VPPartialReductionRecipe *clone() override { - return new VPPartialReductionRecipe(Opcode, getOperand(0), getOperand(1), - getCondOp(), VFScaleFactor, - getUnderlyingInstr()); - } - - VP_CLASSOF_IMPL(VPDef::VPPartialReductionSC) - - /// Generate the reduction in the loop. - void execute(VPTransformState &State) override; - - /// Return the cost of this VPPartialReductionRecipe. - InstructionCost computeCost(ElementCount VF, - VPCostContext &Ctx) const override; - - /// Get the binary op's opcode. - unsigned getOpcode() const { return Opcode; } - - /// Get the factor that the VF of this recipe's output should be scaled by. - unsigned getVFScaleFactor() const { return VFScaleFactor; } + /// Get the factor that the VF of this recipe's output should be scaled by, or + /// 1 if it isn't scaled. + unsigned getVFScaleFactor() const { + auto *Partial = std::get_if(&Style); + return Partial ? Partial->VFScaleFactor : 1; + } protected: #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -2905,7 +2885,7 @@ class LLVM_ABI_FOR_TEST VPReductionEVLRecipe : public VPReductionRecipe { R.getFastMathFlags(), cast_or_null(R.getUnderlyingValue()), ArrayRef({R.getChainOp(), R.getVecOp(), &EVL}), CondOp, - R.isOrdered(), DL) {} + getReductionStyle(/*InLoop=*/true, R.isOrdered(), 1), DL) {} ~VPReductionEVLRecipe() override = default; @@ -3173,7 +3153,7 @@ class VPExpressionRecipe : public VPSingleDefRecipe { void decompose(); unsigned getVFScaleFactor() const { - auto *PR = dyn_cast(ExpressionRecipes.back()); + auto *PR = dyn_cast(ExpressionRecipes.back()); return PR ? PR->getVFScaleFactor() : 1; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 80a2e4bc3f754..ee2a2a1de761e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -115,6 +115,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { case VPInstruction::ExtractLane: return inferScalarType(R->getOperand(1)); case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: return Type::getIntNTy(Ctx, 64); case VPInstruction::ExtractLastElement: case VPInstruction::ExtractLastLanePerPart: @@ -288,10 +289,10 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) { [](const auto *R) { return R->getScalarType(); }) .Case([this](const VPRecipeBase *R) { - return inferScalarType(R->getOperand(0)); - }) + VPVectorEndPointerRecipe, VPWidenCanonicalIVRecipe>( + [this](const VPRecipeBase *R) { + return inferScalarType(R->getOperand(0)); + }) // VPInstructionWithType must be handled before VPInstruction. .Case( @@ -561,11 +562,12 @@ SmallVector llvm::calculateRegisterUsageForPlan( // fewer lanes than the VF. unsigned ScaleFactor = vputils::getVFScaleFactor(VPV->getDefiningRecipe()); - ElementCount VF = VFs[J].divideCoefficientBy(ScaleFactor); - LLVM_DEBUG(if (VF != VFs[J]) { - dbgs() << "LV(REG): Scaled down VF from " << VFs[J] << " to " << VF - << " for " << *R << "\n"; - }); + ElementCount VF = VFs[J]; + if (ScaleFactor > 1) { + VF = VFs[J].divideCoefficientBy(ScaleFactor); + LLVM_DEBUG(dbgs() << "LV(REG): Scaled down VF from " << VFs[J] + << " to " << VF << " for " << *R << "\n";); + } Type *ScalarTy = TypeInfo.inferScalarType(VPV); unsigned ClassID = TTI.getRegisterClassForType(true, ScalarTy); diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h index 91a392cccc1e3..79275e7bffdd0 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h +++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h @@ -398,12 +398,24 @@ m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) { return m_VPInstruction(Op0, Op1); } +template +inline VPInstruction_match +m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) { + return m_VPInstruction(Op0, Op1); +} + template inline VPInstruction_match m_ExtractLastLanePerPart(const Op0_t &Op0) { return m_VPInstruction(Op0); } +template +inline VPInstruction_match +m_ExtractPenultimateElement(const Op0_t &Op0) { + return m_VPInstruction(Op0); +} + template inline VPInstruction_match m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { @@ -436,6 +448,16 @@ m_FirstActiveLane(const Op0_t &Op0) { return m_VPInstruction(Op0); } +template +inline VPInstruction_match +m_LastActiveLane(const Op0_t &Op0) { + return m_VPInstruction(Op0); +} + +inline VPInstruction_match m_StepVector() { + return m_VPInstruction(); +} + template inline AllRecipe_match m_Unary(const Op0_t &Op0) { return AllRecipe_match(Op0); diff --git a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp index fb17d5dd62b9d..3579af21d8b07 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp @@ -44,11 +44,6 @@ class VPPredicator { /// possibly inserting new recipes at \p Dst (using Builder's insertion point) VPValue *createEdgeMask(VPBasicBlock *Src, VPBasicBlock *Dst); - /// Returns the *entry* mask for \p VPBB. - VPValue *getBlockInMask(VPBasicBlock *VPBB) const { - return BlockMaskCache.lookup(VPBB); - } - /// Record \p Mask as the *entry* mask of \p VPBB, which is expected to not /// already have a mask. void setBlockInMask(VPBasicBlock *VPBB, VPValue *Mask) { @@ -68,6 +63,11 @@ class VPPredicator { } public: + /// Returns the *entry* mask for \p VPBB. + VPValue *getBlockInMask(VPBasicBlock *VPBB) const { + return BlockMaskCache.lookup(VPBB); + } + /// Returns the precomputed predicate of the edge from \p Src to \p Dst. VPValue *getEdgeMask(const VPBasicBlock *Src, const VPBasicBlock *Dst) const { return EdgeMaskCache.lookup({Src, Dst}); @@ -301,5 +301,34 @@ VPlanTransforms::introduceMasksAndLinearize(VPlan &Plan, bool FoldTail) { PrevVPBB = VPBB; } + + // If we folded the tail and introduced a header mask, any extract of the + // last element must be updated to extract from the last active lane of the + // header mask instead (i.e., the lane corresponding to the last active + // iteration). + if (FoldTail) { + assert(Plan.getExitBlocks().size() == 1 && + "only a single-exit block is supported currently"); + VPBasicBlock *EB = Plan.getExitBlocks().front(); + assert(EB->getSinglePredecessor() == Plan.getMiddleBlock() && + "the exit block must have middle block as single predecessor"); + + VPBuilder B(Plan.getMiddleBlock()->getTerminator()); + for (auto &P : EB->phis()) { + auto *ExitIRI = cast(&P); + VPValue *Inc = ExitIRI->getIncomingValue(0); + VPValue *Op; + if (!match(Inc, m_ExtractLastElement(m_VPValue(Op)))) + continue; + + // Compute the index of the last active lane. + VPValue *HeaderMask = Predicator.getBlockInMask(Header); + VPValue *LastActiveLane = + B.createNaryOp(VPInstruction::LastActiveLane, HeaderMask); + auto *Ext = + B.createNaryOp(VPInstruction::ExtractLane, {LastActiveLane, Op}); + Inc->replaceAllUsesWith(Ext); + } + } return Predicator.getBlockMaskCache(); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index e41f67103e096..0ba8cec008555 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -180,7 +180,6 @@ bool VPRecipeBase::mayHaveSideEffects() const { return cast(this)->mayHaveSideEffects(); case VPBlendSC: case VPReductionEVLSC: - case VPPartialReductionSC: case VPReductionSC: case VPScalarIVStepsSC: case VPVectorPointerSC: @@ -314,134 +313,6 @@ bool VPRecipeBase::isScalarCast() const { return VPI && Instruction::isCast(VPI->getOpcode()); } -InstructionCost -VPPartialReductionRecipe::computeCost(ElementCount VF, - VPCostContext &Ctx) const { - std::optional Opcode; - VPValue *Op = getVecOp(); - uint64_t MulConst; - - InstructionCost CondCost = 0; - if (isConditional()) { - CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; - auto *VecTy = Ctx.Types.inferScalarType(Op); - auto *CondTy = Ctx.Types.inferScalarType(getCondOp()); - CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VecTy, CondTy, - Pred, Ctx.CostKind); - } - - // If the partial reduction is predicated, a select will be operand 1. - // If it isn't predicated and the mul isn't operating on a constant, then it - // should have been turned into a VPExpressionRecipe. - // FIXME: Replace the entire function with this once all partial reduction - // variants are bundled into VPExpressionRecipe. - if (!match(Op, m_Mul(m_VPValue(), m_ConstantInt(MulConst)))) { - auto *PhiType = Ctx.Types.inferScalarType(getChainOp()); - auto *InputType = Ctx.Types.inferScalarType(getVecOp()); - return CondCost + Ctx.TTI.getPartialReductionCost( - getOpcode(), InputType, InputType, PhiType, VF, - TTI::PR_None, TTI::PR_None, {}, Ctx.CostKind); - } - - VPRecipeBase *OpR = Op->getDefiningRecipe(); - Type *InputTypeA = nullptr, *InputTypeB = nullptr; - TTI::PartialReductionExtendKind ExtAType = TTI::PR_None, - ExtBType = TTI::PR_None; - - auto GetExtendKind = [](VPRecipeBase *R) { - if (!R) - return TTI::PR_None; - auto *WidenCastR = dyn_cast(R); - if (!WidenCastR) - return TTI::PR_None; - if (WidenCastR->getOpcode() == Instruction::CastOps::ZExt) - return TTI::PR_ZeroExtend; - if (WidenCastR->getOpcode() == Instruction::CastOps::SExt) - return TTI::PR_SignExtend; - return TTI::PR_None; - }; - - // Pick out opcode, type/ext information and use sub side effects from a widen - // recipe. - auto HandleWiden = [&](VPWidenRecipe *Widen) { - if (match(Widen, m_Sub(m_ZeroInt(), m_VPValue(Op)))) { - Widen = dyn_cast(Op); - } - Opcode = Widen->getOpcode(); - VPRecipeBase *ExtAR = Widen->getOperand(0)->getDefiningRecipe(); - VPRecipeBase *ExtBR = Widen->getOperand(1)->getDefiningRecipe(); - InputTypeA = Ctx.Types.inferScalarType(ExtAR ? ExtAR->getOperand(0) - : Widen->getOperand(0)); - InputTypeB = Ctx.Types.inferScalarType(ExtBR ? ExtBR->getOperand(0) - : Widen->getOperand(1)); - ExtAType = GetExtendKind(ExtAR); - ExtBType = GetExtendKind(ExtBR); - - using namespace VPlanPatternMatch; - const APInt *C; - if (!ExtBR && match(Widen->getOperand(1), m_APInt(C)) && - canConstantBeExtended(C, InputTypeA, ExtAType)) { - InputTypeB = InputTypeA; - ExtBType = ExtAType; - } - }; - - if (isa(OpR)) { - InputTypeA = Ctx.Types.inferScalarType(OpR->getOperand(0)); - ExtAType = GetExtendKind(OpR); - } else if (isa(OpR)) { - if (auto RedPhiOp1R = dyn_cast_or_null(getOperand(1))) { - InputTypeA = Ctx.Types.inferScalarType(RedPhiOp1R->getOperand(0)); - ExtAType = GetExtendKind(RedPhiOp1R); - } else if (auto Widen = dyn_cast_or_null(getOperand(1))) - HandleWiden(Widen); - } else if (auto Widen = dyn_cast(OpR)) { - HandleWiden(Widen); - } else if (auto Reduction = dyn_cast(OpR)) { - return CondCost + Reduction->computeCost(VF, Ctx); - } - auto *PhiType = Ctx.Types.inferScalarType(getOperand(1)); - return CondCost + Ctx.TTI.getPartialReductionCost( - getOpcode(), InputTypeA, InputTypeB, PhiType, VF, - ExtAType, ExtBType, Opcode, Ctx.CostKind); - ; -} - -void VPPartialReductionRecipe::execute(VPTransformState &State) { - auto &Builder = State.Builder; - - assert(getOpcode() == Instruction::Add && - "Unhandled partial reduction opcode"); - - Value *BinOpVal = State.get(getVecOp()); - Value *PhiVal = State.get(getChainOp()); - assert(PhiVal && BinOpVal && "Phi and Mul must be set"); - - Type *RetTy = PhiVal->getType(); - - if (isConditional()) { - Value *Cond = State.get(getCondOp()); - Value *Zero = ConstantInt::get(BinOpVal->getType(), 0); - BinOpVal = Builder.CreateSelect(Cond, BinOpVal, Zero); - } - - CallInst *V = - Builder.CreateIntrinsic(RetTy, Intrinsic::vector_partial_reduce_add, - {PhiVal, BinOpVal}, nullptr, "partial.reduce"); - - State.set(this, V); -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -void VPPartialReductionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, - VPSlotTracker &SlotTracker) const { - O << Indent << "PARTIAL-REDUCE "; - printAsOperand(O, SlotTracker); - O << " = " << Instruction::getOpcodeName(getOpcode()) << " "; - printOperands(O, SlotTracker); -} -#endif - void VPIRFlags::intersectFlags(const VPIRFlags &Other) { assert(OpType == Other.OpType && "OpType must match"); switch (OpType) { @@ -569,7 +440,6 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) { case VPInstruction::ExtractLastElement: case VPInstruction::ExtractLastLanePerPart: case VPInstruction::ExtractPenultimateElement: - case VPInstruction::FirstActiveLane: case VPInstruction::Not: case VPInstruction::ResumeForEpilogue: case VPInstruction::Unpack: @@ -599,6 +469,8 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) { case Instruction::PHI: case Instruction::Switch: case VPInstruction::AnyOf: + case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: case VPInstruction::SLPLoad: case VPInstruction::SLPStore: // Cannot determine the number of operands from the opcode. @@ -1184,6 +1056,29 @@ InstructionCost VPInstruction::computeCost(ElementCount VF, {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)}); return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind); } + case VPInstruction::LastActiveLane: { + Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0)); + if (VF.isScalar()) + return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy, + CmpInst::makeCmpResultType(ScalarTy), + CmpInst::ICMP_EQ, Ctx.CostKind); + // Calculate the cost of determining the lane index: NOT + cttz_elts + SUB. + auto *PredTy = toVectorTy(ScalarTy, VF); + IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts, + Type::getInt64Ty(Ctx.LLVMCtx), + {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)}); + InstructionCost Cost = Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind); + // Add cost of NOT operation on the predicate. + Cost += Ctx.TTI.getArithmeticInstrCost( + Instruction::Xor, PredTy, Ctx.CostKind, + {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None}, + {TargetTransformInfo::OK_UniformConstantValue, + TargetTransformInfo::OP_None}); + // Add cost of SUB operation on the index. + Cost += Ctx.TTI.getArithmeticInstrCost( + Instruction::Sub, Type::getInt64Ty(Ctx.LLVMCtx), Ctx.CostKind); + return Cost; + } case VPInstruction::FirstOrderRecurrenceSplice: { assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?"); SmallVector Mask(VF.getKnownMinValue()); @@ -1238,6 +1133,7 @@ bool VPInstruction::isVectorToScalar() const { getOpcode() == Instruction::ExtractElement || getOpcode() == VPInstruction::ExtractLane || getOpcode() == VPInstruction::FirstActiveLane || + getOpcode() == VPInstruction::LastActiveLane || getOpcode() == VPInstruction::ComputeAnyOfResult || getOpcode() == VPInstruction::ComputeFindIVResult || getOpcode() == VPInstruction::ComputeReductionResult || @@ -1305,6 +1201,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const { case VPInstruction::ActiveLaneMask: case VPInstruction::ExplicitVectorLength: case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: case VPInstruction::FirstOrderRecurrenceSplice: case VPInstruction::LogicalAnd: case VPInstruction::Not: @@ -1481,6 +1378,9 @@ void VPInstruction::printRecipe(raw_ostream &O, const Twine &Indent, case VPInstruction::FirstActiveLane: O << "first-active-lane"; break; + case VPInstruction::LastActiveLane: + O << "last-active-lane"; + break; case VPInstruction::ReductionStartVector: O << "reduction-start-vector"; break; @@ -2593,22 +2493,11 @@ void VPWidenGEPRecipe::printRecipe(raw_ostream &O, const Twine &Indent, } #endif -static Type *getGEPIndexTy(bool IsScalable, bool IsReverse, bool IsUnitStride, - unsigned CurrentPart, IRBuilderBase &Builder) { - // Use i32 for the gep index type when the value is constant, - // or query DataLayout for a more suitable index type otherwise. - const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout(); - return !IsUnitStride || (IsScalable && (IsReverse || CurrentPart > 0)) - ? DL.getIndexType(Builder.getPtrTy(0)) - : Builder.getInt32Ty(); -} - void VPVectorEndPointerRecipe::execute(VPTransformState &State) { auto &Builder = State.Builder; unsigned CurrentPart = getUnrollPart(*this); - bool IsUnitStride = Stride == 1 || Stride == -1; - Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ true, - IsUnitStride, CurrentPart, Builder); + const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout(); + Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(this)); // The wide store needs to start at the last vector element. Value *RunTimeVF = State.get(getVFValue(), VPLane(0)); @@ -2644,8 +2533,8 @@ void VPVectorEndPointerRecipe::printRecipe(raw_ostream &O, const Twine &Indent, void VPVectorPointerRecipe::execute(VPTransformState &State) { auto &Builder = State.Builder; unsigned CurrentPart = getUnrollPart(*this); - Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false, - /*IsUnitStride*/ true, CurrentPart, Builder); + const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout(); + Type *IndexTy = DL.getIndexType(State.TypeAnalysis.inferScalarType(this)); Value *Ptr = State.get(getOperand(0), VPLane(0)); Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart); @@ -2706,7 +2595,6 @@ void VPBlendRecipe::printRecipe(raw_ostream &O, const Twine &Indent, void VPReductionRecipe::execute(VPTransformState &State) { assert(!State.Lane && "Reduction being replicated."); - Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); RecurKind Kind = getRecurrenceKind(); assert(!RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind) && "In-loop AnyOf reductions aren't currently supported"); @@ -2728,7 +2616,8 @@ void VPReductionRecipe::execute(VPTransformState &State) { } Value *NewRed; Value *NextInChain; - if (IsOrdered) { + if (isOrdered()) { + Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); if (State.VF.isVector()) NewRed = createOrderedReduction(State.Builder, Kind, NewVecOp, PrevInChain); @@ -2738,8 +2627,18 @@ void VPReductionRecipe::execute(VPTransformState &State) { PrevInChain, NewVecOp); PrevInChain = NewRed; NextInChain = NewRed; + } else if (isPartialReduction()) { + assert(Kind == RecurKind::Add && "Unexpected partial reduction kind"); + Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ false); + NewRed = State.Builder.CreateIntrinsic( + PrevInChain->getType(), Intrinsic::vector_partial_reduce_add, + {PrevInChain, NewVecOp}, nullptr, "partial.reduce"); + PrevInChain = NewRed; + NextInChain = NewRed; } else { - PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); + assert(isInLoop() && + "The reduction must either be ordered, partial or in-loop"); + Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true); NewRed = createSimpleReduction(State.Builder, NewVecOp, Kind); if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain); @@ -2748,7 +2647,7 @@ void VPReductionRecipe::execute(VPTransformState &State) { (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(Kind), PrevInChain, NewRed); } - State.set(this, NextInChain, /*IsScalar*/ true); + State.set(this, NextInChain, /*IsScalar*/ !isPartialReduction()); } void VPReductionEVLRecipe::execute(VPTransformState &State) { @@ -2795,6 +2694,22 @@ InstructionCost VPReductionRecipe::computeCost(ElementCount VF, std::optional OptionalFMF = ElementTy->isFloatingPointTy() ? std::make_optional(FMFs) : std::nullopt; + if (isPartialReduction()) { + InstructionCost CondCost = 0; + if (isConditional()) { + CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; + auto *CondTy = cast( + toVectorTy(Ctx.Types.inferScalarType(getCondOp()), VF)); + CondCost = Ctx.TTI.getCmpSelInstrCost(Instruction::Select, VectorTy, + CondTy, Pred, Ctx.CostKind); + } + return CondCost + Ctx.TTI.getPartialReductionCost( + Opcode, ElementTy, ElementTy, ElementTy, VF, + TargetTransformInfo::PR_None, + TargetTransformInfo::PR_None, std::nullopt, + Ctx.CostKind); + } + // TODO: Support any-of reductions. assert( (!RecurrenceDescriptor::isAnyOfRecurrenceKind(RdxKind) || @@ -2900,7 +2815,9 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF, unsigned Opcode = RecurrenceDescriptor::getOpcode( cast(ExpressionRecipes[1])->getRecurrenceKind()); auto *ExtR = cast(ExpressionRecipes[0]); - return isa(ExpressionRecipes.back()) + + return cast(ExpressionRecipes.back()) + ->isPartialReduction() ? Ctx.TTI.getPartialReductionCost( Opcode, Ctx.Types.inferScalarType(getOperand(0)), nullptr, RedTy, VF, @@ -2920,7 +2837,8 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF, Opcode = Instruction::Sub; [[fallthrough]]; case ExpressionTypes::ExtMulAccReduction: { - if (isa(ExpressionRecipes.back())) { + auto *RedR = cast(ExpressionRecipes.back()); + if (RedR->isPartialReduction()) { auto *Ext0R = cast(ExpressionRecipes[0]); auto *Ext1R = cast(ExpressionRecipes[1]); auto *Mul = cast(ExpressionRecipes[2]); @@ -2959,8 +2877,8 @@ bool VPExpressionRecipe::mayHaveSideEffects() const { bool VPExpressionRecipe::isSingleScalar() const { // Cannot use vputils::isSingleScalar(), because all external operands // of the expression will be live-ins while bundled. - return isa(ExpressionRecipes.back()) && - !isa(ExpressionRecipes.back()); + auto *RR = dyn_cast(ExpressionRecipes.back()); + return RR && !RR->isPartialReduction(); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -2972,12 +2890,11 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, O << " = "; auto *Red = cast(ExpressionRecipes.back()); unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()); - bool IsPartialReduction = isa(Red); switch (ExpressionType) { case ExpressionTypes::ExtendedReduction: { getOperand(1)->printAsOperand(O, SlotTracker); - O << " + " << (IsPartialReduction ? "partial." : "") << "reduce."; + O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce."; O << Instruction::getOpcodeName(Opcode) << " ("; getOperand(0)->printAsOperand(O, SlotTracker); Red->printFlags(O); @@ -2994,7 +2911,7 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, } case ExpressionTypes::ExtNegatedMulAccReduction: { getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker); - O << " + " << (IsPartialReduction ? "partial." : "") << "reduce."; + O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce."; O << Instruction::getOpcodeName( RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind())) << " (sub (0, mul"; @@ -3019,7 +2936,7 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, case ExpressionTypes::MulAccReduction: case ExpressionTypes::ExtMulAccReduction: { getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker); - O << " + " << (IsPartialReduction ? "partial." : "") << "reduce."; + O << " + " << (Red->isPartialReduction() ? "partial." : "") << "reduce."; O << Instruction::getOpcodeName( RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind())) << " ("; @@ -3056,7 +2973,10 @@ void VPExpressionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, void VPReductionRecipe::printRecipe(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const { - O << Indent << "REDUCE "; + if (isPartialReduction()) + O << Indent << "PARTIAL-REDUCE "; + else + O << Indent << "REDUCE "; printAsOperand(O, SlotTracker); O << " = "; getChainOp()->printAsOperand(O, SlotTracker); @@ -4444,7 +4364,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) { // this value when we vectorize all of the instructions that use the PHI. BasicBlock *VectorPH = State.CFG.VPBB2IRBB.at(getParent()->getCFGPredecessor(0)); - bool ScalarPHI = State.VF.isScalar() || IsInLoop; + bool ScalarPHI = State.VF.isScalar() || isInLoop(); Value *StartV = State.get(StartVPV, ScalarPHI); Type *VecTy = StartV->getType(); @@ -4453,7 +4373,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) { "recipe must be in the vector loop header"); auto *Phi = PHINode::Create(VecTy, 2, "vec.phi"); Phi->insertBefore(HeaderBB->getFirstInsertionPt()); - State.set(this, Phi, IsInLoop); + State.set(this, Phi, isInLoop()); Phi->addIncoming(StartV, VectorPH); } @@ -4466,8 +4386,8 @@ void VPReductionPHIRecipe::printRecipe(raw_ostream &O, const Twine &Indent, printAsOperand(O, SlotTracker); O << " = phi "; printOperands(O, SlotTracker); - if (VFScaleFactor != 1) - O << " (VF scaled by 1/" << VFScaleFactor << ")"; + if (getVFScaleFactor() > 1) + O << " (VF scaled by 1/" << getVFScaleFactor() << ")"; } #endif diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 9174058baad65..ef82eeba8b9a5 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -139,6 +139,41 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( return true; } +// Check if a load can be hoisted by verifying it doesn't alias with any stores +// in blocks between FirstBB and LastBB using scoped noalias metadata. +static bool canHoistLoadWithNoAliasCheck(VPReplicateRecipe *Load, + VPBasicBlock *FirstBB, + VPBasicBlock *LastBB) { + // Get the load's memory location and check if it aliases with any stores + // using scoped noalias metadata. + auto LoadLoc = vputils::getMemoryLocation(*Load); + if (!LoadLoc || !LoadLoc->AATags.Scope) + return false; + + const AAMDNodes &LoadAA = LoadLoc->AATags; + for (VPBlockBase *Block = FirstBB; Block; + Block = Block->getSingleSuccessor()) { + // This function assumes a simple linear chain of blocks. If there are + // multiple successors, we would need more complex analysis. + assert(Block->getNumSuccessors() <= 1 && + "Expected at most one successor in block chain"); + auto *VPBB = cast(Block); + for (VPRecipeBase &R : *VPBB) { + if (R.mayWriteToMemory()) { + auto Loc = vputils::getMemoryLocation(R); + // Bail out if we can't get the location or if the scoped noalias + // metadata indicates potential aliasing. + if (!Loc || ScopedNoAliasAAResult::mayAliasInScopes( + LoadAA.Scope, Loc->AATags.NoAlias)) + return false; + } + } + if (Block == LastBB) + break; + } + return true; +} + /// Return true if we do not know how to (mechanically) hoist or sink \p R out /// of a loop region. static bool cannotHoistOrSinkRecipe(const VPRecipeBase &R) { @@ -826,8 +861,8 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan, VPValue *Op, ScalarEvolution &SE) { VPValue *Incoming, *Mask; - if (!match(Op, m_VPInstruction( - m_FirstActiveLane(m_VPValue(Mask)), m_VPValue(Incoming)))) + if (!match(Op, m_ExtractLane(m_FirstActiveLane(m_VPValue(Mask)), + m_VPValue(Incoming)))) return nullptr; auto *WideIV = getOptimizableIVOf(Incoming, SE); @@ -1327,8 +1362,7 @@ static void simplifyRecipe(VPSingleDefRecipe *Def, VPTypeAnalysis &TypeInfo) { } // Look through ExtractPenultimateElement (BuildVector ....). - if (match(Def, m_VPInstruction( - m_BuildVector()))) { + if (match(Def, m_ExtractPenultimateElement(m_BuildVector()))) { auto *BuildVector = cast(Def->getOperand(0)); Def->replaceAllUsesWith( BuildVector->getOperand(BuildVector->getNumOperands() - 2)); @@ -2140,6 +2174,32 @@ bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan, // Set the first operand of RecurSplice to FOR again, after replacing // all users. RecurSplice->setOperand(0, FOR); + + // Check for users extracting at the penultimate active lane of the FOR. + // If only a single lane is active in the current iteration, we need to + // select the last element from the previous iteration (from the FOR phi + // directly). + for (VPUser *U : RecurSplice->users()) { + if (!match(U, m_ExtractLane(m_LastActiveLane(m_VPValue()), + m_Specific(RecurSplice)))) + continue; + + VPBuilder B(cast(U)); + VPValue *LastActiveLane = cast(U)->getOperand(0); + Type *I64Ty = Type::getInt64Ty(Plan.getContext()); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(I64Ty, 0)); + VPValue *One = Plan.getOrAddLiveIn(ConstantInt::get(I64Ty, 1)); + VPValue *PenultimateIndex = + B.createNaryOp(Instruction::Sub, {LastActiveLane, One}); + VPValue *PenultimateLastIter = + B.createNaryOp(VPInstruction::ExtractLane, + {PenultimateIndex, FOR->getBackedgeValue()}); + VPValue *LastPrevIter = + B.createNaryOp(VPInstruction::ExtractLastElement, FOR); + VPValue *Cmp = B.createICmp(CmpInst::ICMP_EQ, LastActiveLane, Zero); + VPValue *Sel = B.createSelect(Cmp, LastPrevIter, PenultimateLastIter); + cast(U)->replaceAllUsesWith(Sel); + } } return true; } @@ -3528,6 +3588,34 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { ToRemove.push_back(Expr); } + // Expand LastActiveLane into Not + FirstActiveLane + Sub. + auto *LastActiveL = dyn_cast(&R); + if (LastActiveL && + LastActiveL->getOpcode() == VPInstruction::LastActiveLane) { + // Create Not(Mask) for all operands. + SmallVector NotMasks; + for (VPValue *Op : LastActiveL->operands()) { + VPValue *NotMask = Builder.createNot(Op, LastActiveL->getDebugLoc()); + NotMasks.push_back(NotMask); + } + + // Create FirstActiveLane on the inverted masks. + VPValue *FirstInactiveLane = Builder.createNaryOp( + VPInstruction::FirstActiveLane, NotMasks, + LastActiveL->getDebugLoc(), "first.inactive.lane"); + + // Subtract 1 to get the last active lane. + VPValue *One = Plan.getOrAddLiveIn( + ConstantInt::get(Type::getInt64Ty(Plan.getContext()), 1)); + VPValue *LastLane = Builder.createNaryOp( + Instruction::Sub, {FirstInactiveLane, One}, + LastActiveL->getDebugLoc(), "last.active.lane"); + + LastActiveL->replaceAllUsesWith(LastLane); + ToRemove.push_back(LastActiveL); + continue; + } + VPValue *VectorStep; VPValue *ScalarStep; if (!match(&R, m_VPInstruction( @@ -3675,7 +3763,7 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx, cast(VecOp)->computeCost(VF, Ctx); InstructionCost RedCost = Red->computeCost(VF, Ctx); - if (isa(Red)) { + if (Red->isPartialReduction()) { TargetTransformInfo::PartialReductionExtendKind ExtKind = TargetTransformInfo::getPartialReductionExtendKind(ExtOpc); // FIXME: Move partial reduction creation, costing and clamping @@ -3716,8 +3804,6 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx, static VPExpressionRecipe * tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, VPCostContext &Ctx, VFRange &Range) { - bool IsPartialReduction = isa(Red); - unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()); if (Opcode != Instruction::Add && Opcode != Instruction::Sub) return nullptr; @@ -3735,7 +3821,7 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, Ext0 ? Ctx.Types.inferScalarType(Ext0->getOperand(0)) : RedTy; InstructionCost MulAccCost; - if (IsPartialReduction) { + if (Red->isPartialReduction()) { Type *SrcTy2 = Ext1 ? Ctx.Types.inferScalarType(Ext1->getOperand(0)) : nullptr; // FIXME: Move partial reduction creation, costing and clamping @@ -4010,6 +4096,122 @@ void VPlanTransforms::hoistInvariantLoads(VPlan &Plan) { } } +// Returns the intersection of metadata from a group of loads. +static VPIRMetadata getCommonLoadMetadata(ArrayRef Loads) { + VPIRMetadata CommonMetadata = *Loads.front(); + for (VPReplicateRecipe *Load : drop_begin(Loads)) + CommonMetadata.intersect(*Load); + return CommonMetadata; +} + +void VPlanTransforms::hoistPredicatedLoads(VPlan &Plan, ScalarEvolution &SE, + const Loop *L) { + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + VPTypeAnalysis TypeInfo(Plan); + VPDominatorTree VPDT(Plan); + + // Group predicated loads by their address SCEV. + DenseMap> LoadsByAddress; + for (VPBlockBase *Block : vp_depth_first_shallow(LoopRegion->getEntry())) { + auto *VPBB = cast(Block); + for (VPRecipeBase &R : *VPBB) { + auto *RepR = dyn_cast(&R); + if (!RepR || RepR->getOpcode() != Instruction::Load || + !RepR->isPredicated()) + continue; + + VPValue *Addr = RepR->getOperand(0); + const SCEV *AddrSCEV = vputils::getSCEVExprForVPValue(Addr, SE, L); + if (!isa(AddrSCEV)) + LoadsByAddress[AddrSCEV].push_back(RepR); + } + } + + // For each address, collect loads with complementary masks, sort by + // dominance, and use the earliest load. + for (auto &[Addr, Loads] : LoadsByAddress) { + if (Loads.size() < 2) + continue; + + // Collect groups of loads with complementary masks. + SmallVector> LoadGroups; + for (VPReplicateRecipe *&LoadI : Loads) { + if (!LoadI) + continue; + + VPValue *MaskI = LoadI->getMask(); + Type *TypeI = TypeInfo.inferScalarType(LoadI); + SmallVector Group; + Group.push_back(LoadI); + LoadI = nullptr; + + // Find all loads with the same type. + for (VPReplicateRecipe *&LoadJ : Loads) { + if (!LoadJ) + continue; + + Type *TypeJ = TypeInfo.inferScalarType(LoadJ); + if (TypeI == TypeJ) { + Group.push_back(LoadJ); + LoadJ = nullptr; + } + } + + // Check if any load in the group has a complementary mask with another, + // that is M1 == NOT(M2) or M2 == NOT(M1). + bool HasComplementaryMask = + any_of(drop_begin(Group), [MaskI](VPReplicateRecipe *Load) { + VPValue *MaskJ = Load->getMask(); + return match(MaskI, m_Not(m_Specific(MaskJ))) || + match(MaskJ, m_Not(m_Specific(MaskI))); + }); + + if (HasComplementaryMask) + LoadGroups.push_back(std::move(Group)); + } + + // For each group, check memory dependencies and hoist the earliest load. + for (auto &Group : LoadGroups) { + // Sort loads by dominance order, with earliest (most dominating) first. + sort(Group, [&VPDT](VPReplicateRecipe *A, VPReplicateRecipe *B) { + return VPDT.properlyDominates(A, B); + }); + + VPReplicateRecipe *EarliestLoad = Group.front(); + VPBasicBlock *FirstBB = EarliestLoad->getParent(); + VPBasicBlock *LastBB = Group.back()->getParent(); + + // Check that the load doesn't alias with stores between first and last. + if (!canHoistLoadWithNoAliasCheck(EarliestLoad, FirstBB, LastBB)) + continue; + + // Find the load with minimum alignment to use. + auto *LoadWithMinAlign = + *min_element(Group, [](VPReplicateRecipe *A, VPReplicateRecipe *B) { + return cast(A->getUnderlyingInstr())->getAlign() < + cast(B->getUnderlyingInstr())->getAlign(); + }); + + // Collect common metadata from all loads in the group. + VPIRMetadata CommonMetadata = getCommonLoadMetadata(Group); + + // Create an unpredicated load with minimum alignment using the earliest + // dominating address and common metadata. + auto *UnpredicatedLoad = new VPReplicateRecipe( + LoadWithMinAlign->getUnderlyingInstr(), EarliestLoad->getOperand(0), + /*IsSingleScalar=*/false, /*Mask=*/nullptr, /*Flags=*/{}, + CommonMetadata); + UnpredicatedLoad->insertBefore(EarliestLoad); + + // Replace all loads in the group with the unpredicated load. + for (VPReplicateRecipe *Load : Group) { + Load->replaceAllUsesWith(UnpredicatedLoad); + Load->eraseFromParent(); + } + } + } +} + void VPlanTransforms::materializeConstantVectorTripCount( VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE) { diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 5fd3f756c55e3..6245a5107a5d0 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -314,6 +314,12 @@ struct VPlanTransforms { /// plan using noalias metadata. static void hoistInvariantLoads(VPlan &Plan); + /// Hoist predicated loads from the same address to the loop entry block, if + /// they are guaranteed to execute on both paths (i.e., in replicate regions + /// with complementary masks P and NOT P). + static void hoistPredicatedLoads(VPlan &Plan, ScalarEvolution &SE, + const Loop *L); + // Materialize vector trip counts for constants early if it can simply be // computed as (Original TC / VF * UF) * VF * UF. static void diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp index 4af00f986aab0..c497c5925c277 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp @@ -352,6 +352,7 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { VPValue *Op1; if (match(&R, m_VPInstruction(m_VPValue(Op1))) || match(&R, m_FirstActiveLane(m_VPValue(Op1))) || + match(&R, m_LastActiveLane(m_VPValue(Op1))) || match(&R, m_VPInstruction( m_VPValue(), m_VPValue(), m_VPValue(Op1))) || match(&R, m_VPInstruction( @@ -364,17 +365,21 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { continue; } VPValue *Op0; - if (match(&R, m_VPInstruction( - m_VPValue(Op0), m_VPValue(Op1)))) { + if (match(&R, m_ExtractLane(m_VPValue(Op0), m_VPValue(Op1)))) { addUniformForAllParts(cast(&R)); for (unsigned Part = 1; Part != UF; ++Part) R.addOperand(getValueForPart(Op1, Part)); continue; } if (match(&R, m_ExtractLastElement(m_VPValue(Op0))) || - match(&R, m_VPInstruction( - m_VPValue(Op0)))) { + match(&R, m_ExtractPenultimateElement(m_VPValue(Op0)))) { addUniformForAllParts(cast(&R)); + if (isa(Op0)) { + assert(match(&R, m_ExtractLastElement(m_VPValue())) && + "can only extract last element of FOR"); + continue; + } + if (Plan.hasScalarVFOnly()) { auto *I = cast(&R); // Extracting from end with VF = 1 implies retrieving the last or diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index 839a304904e8b..c7a0fd7407a4e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -195,10 +195,9 @@ bool vputils::isSingleScalar(const VPValue *VPV) { return VPI->isSingleScalar() || VPI->isVectorToScalar() || (preservesUniformity(VPI->getOpcode()) && all_of(VPI->operands(), isSingleScalar)); - if (isa(VPV)) - return false; - if (isa( - VPV)) + if (auto *RR = dyn_cast(VPV)) + return !RR->isPartialReduction(); + if (isa(VPV)) return true; if (auto *Expr = dyn_cast(VPV)) return Expr->isSingleScalar(); @@ -270,7 +269,7 @@ unsigned vputils::getVFScaleFactor(VPRecipeBase *R) { return 1; if (auto *RR = dyn_cast(R)) return RR->getVFScaleFactor(); - if (auto *RR = dyn_cast(R)) + if (auto *RR = dyn_cast(R)) return RR->getVFScaleFactor(); if (auto *ER = dyn_cast(R)) return ER->getVFScaleFactor(); diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h index 63eacd3d75721..b9f5847ec731c 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanValue.h +++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -349,7 +349,6 @@ class VPDef { VPInterleaveSC, VPReductionEVLSC, VPReductionSC, - VPPartialReductionSC, VPReplicateSC, VPScalarIVStepsSC, VPVectorPointerSC, diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 34754a1ea3992..2d63d2a787f88 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -18,6 +18,7 @@ #include "VPlanDominatorTree.h" #include "VPlanHelpers.h" #include "VPlanPatternMatch.h" +#include "VPlanUtils.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/TypeSwitch.h" @@ -44,6 +45,9 @@ class VPlanVerifier { /// incoming value into EVL's recipe. bool verifyEVLRecipe(const VPInstruction &EVL) const; + /// Verify that \p LastActiveLane's operand is guaranteed to be a prefix-mask. + bool verifyLastActiveLaneRecipe(const VPInstruction &LastActiveLane) const; + bool verifyVPBasicBlock(const VPBasicBlock *VPBB); bool verifyBlock(const VPBlockBase *VPB); @@ -221,6 +225,44 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const { }); } +bool VPlanVerifier::verifyLastActiveLaneRecipe( + const VPInstruction &LastActiveLane) const { + assert(LastActiveLane.getOpcode() == VPInstruction::LastActiveLane && + "must be called with VPInstruction::LastActiveLane"); + + if (LastActiveLane.getNumOperands() < 1) { + errs() << "LastActiveLane must have at least one operand\n"; + return false; + } + + const VPlan &Plan = *LastActiveLane.getParent()->getPlan(); + // All operands must be prefix-mask. Currently we check for header masks or + // EVL-derived masks, as those are currently the only operands in practice, + // but this may need updating in the future. + for (VPValue *Op : LastActiveLane.operands()) { + if (vputils::isHeaderMask(Op, Plan)) + continue; + + // Masks derived from EVL are also fine. + auto BroadcastOrEVL = + m_CombineOr(m_Broadcast(m_EVL(m_VPValue())), m_EVL(m_VPValue())); + if (match(Op, m_CombineOr(m_ICmp(m_StepVector(), BroadcastOrEVL), + m_ICmp(BroadcastOrEVL, m_StepVector())))) + continue; + + errs() << "LastActiveLane operand "; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + VPSlotTracker Tracker(&Plan); + Op->printAsOperand(errs(), Tracker); +#endif + errs() << " must be prefix mask (a header mask or an " + "EVL-derived mask currently)\n"; + return false; + } + + return true; +} + bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { if (!verifyPhiRecipes(VPBB)) return false; @@ -313,6 +355,10 @@ bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { return false; } break; + case VPInstruction::LastActiveLane: + if (!verifyLastActiveLaneRecipe(*VPI)) + return false; + break; default: break; } diff --git a/llvm/test/Analysis/LoopCacheAnalysis/crash-after-pr164798.ll b/llvm/test/Analysis/LoopCacheAnalysis/crash-after-pr164798.ll new file mode 100644 index 0000000000000..e6b6d1753adb7 --- /dev/null +++ b/llvm/test/Analysis/LoopCacheAnalysis/crash-after-pr164798.ll @@ -0,0 +1,33 @@ +; RUN: opt < %s -passes='print' -disable-output + +; Ensure no crash happens after PR #164798 + +target datalayout = "p21:32:16" + +define i16 @f() { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: + %i.02 = phi i16 [ 0, %entry ], [ %inc8, %for.cond.cleanup3 ] + %idxprom = zext i16 %i.02 to i32 + %arrayidx = getelementptr [18 x i16], ptr addrspace(21) null, i32 %idxprom + br label %for.body4 + +for.cond.cleanup: + ret i16 0 + +for.cond.cleanup3: + %inc8 = add i16 %i.02, 1 + %exitcond3.not = icmp eq i16 %inc8, 0 + br i1 %exitcond3.not, label %for.cond.cleanup, label %for.cond1.preheader + +for.body4: + %j.01 = phi i16 [ 0, %for.cond1.preheader ], [ %inc.2, %for.body4 ] + %idxprom5 = zext i16 %j.01 to i32 + %arrayidx6 = getelementptr i16, ptr addrspace(21) %arrayidx, i32 %idxprom5 + store i16 0, ptr addrspace(21) %arrayidx6, align 1 + %inc.2 = add i16 %j.01, 1 + %exitcond.not.2 = icmp eq i16 %inc.2, 18 + br i1 %exitcond.not.2, label %for.cond.cleanup3, label %for.body4 +} diff --git a/llvm/test/Assembler/invalid-ptrauth-const6.ll b/llvm/test/Assembler/invalid-ptrauth-const6.ll new file mode 100644 index 0000000000000..6e8e1d386acc8 --- /dev/null +++ b/llvm/test/Assembler/invalid-ptrauth-const6.ll @@ -0,0 +1,6 @@ +; RUN: not llvm-as < %s 2>&1 | FileCheck %s + +@var = global i32 0 + +; CHECK: error: constant ptrauth deactivation symbol must be a pointer +@ptr = global ptr ptrauth (ptr @var, i32 0, i64 65535, ptr null, i64 0) diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll index e21786e5ee330..53cbe2d6ffd37 100644 --- a/llvm/test/Bitcode/compatibility.ll +++ b/llvm/test/Bitcode/compatibility.ll @@ -217,9 +217,13 @@ declare void @g.f1() ; CHECK: @g.sanitize_address_dyninit = global i32 0, sanitize_address_dyninit ; CHECK: @g.sanitize_multiple = global i32 0, sanitize_memtag, sanitize_address_dyninit +@ds = external global i32 + ; ptrauth constant @auth_var = global ptr ptrauth (ptr @g1, i32 0, i64 65535, ptr null) ; CHECK: @auth_var = global ptr ptrauth (ptr @g1, i32 0, i64 65535) +@auth_var.ds = global ptr ptrauth (ptr @g1, i32 0, i64 65535, ptr null, ptr @ds) +; CHECK: @auth_var.ds = global ptr ptrauth (ptr @g1, i32 0, i64 65535, ptr null, ptr @ds) ;; Aliases ; Format: @ = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal] diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll index 5628e17b4936e..01e5b3f6673ae 100644 --- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll +++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: @test_v2i64(<2 x i64> %n) { +; CHECK-SD-LABEL: test_v2i64: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sshr v0.2d, v0.2d, #35 +; CHECK-SD-NEXT: xtn v0.2s, v0.2d +; CHECK-SD-NEXT: usra v0.2s, v0.2s, #31 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_v2i64: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ushr v1.2d, v0.2d, #63 +; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #35 +; CHECK-GI-NEXT: xtn v1.2s, v1.2d +; CHECK-GI-NEXT: xtn v0.2s, v0.2d +; CHECK-GI-NEXT: add v0.2s, v1.2s, v0.2s +; CHECK-GI-NEXT: ret +entry: + %shr = lshr <2 x i64> %n, splat (i64 63) + %vmovn.i4 = trunc nuw nsw <2 x i64> %shr to <2 x i32> + %shr1 = ashr <2 x i64> %n, splat (i64 35) + %vmovn.i = trunc nsw <2 x i64> %shr1 to <2 x i32> + %add = add nsw <2 x i32> %vmovn.i4, %vmovn.i + ret <2 x i32> %add +} + +define <4 x i16> @test_v4i32(<4 x i32> %n) { +; CHECK-SD-LABEL: test_v4i32: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sshr v0.4s, v0.4s, #17 +; CHECK-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-SD-NEXT: usra v0.4h, v0.4h, #15 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_v4i32: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ushr v1.4s, v0.4s, #31 +; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #17 +; CHECK-GI-NEXT: xtn v1.4h, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: add v0.4h, v1.4h, v0.4h +; CHECK-GI-NEXT: ret +entry: + %shr = lshr <4 x i32> %n, splat (i32 31) + %vmovn.i4 = trunc nuw nsw <4 x i32> %shr to <4 x i16> + %shr1 = ashr <4 x i32> %n, splat (i32 17) + %vmovn.i = trunc nsw <4 x i32> %shr1 to <4 x i16> + %add = add nsw <4 x i16> %vmovn.i4, %vmovn.i + ret <4 x i16> %add +} + +define <8 x i8> @test_v8i16(<8 x i16> %n) { +; CHECK-SD-LABEL: test_v8i16: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sshr v0.8h, v0.8h, #9 +; CHECK-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-SD-NEXT: usra v0.8b, v0.8b, #7 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_v8i16: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ushr v1.8h, v0.8h, #15 +; CHECK-GI-NEXT: sshr v0.8h, v0.8h, #9 +; CHECK-GI-NEXT: xtn v1.8b, v1.8h +; CHECK-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-GI-NEXT: add v0.8b, v1.8b, v0.8b +; CHECK-GI-NEXT: ret +entry: + %shr = lshr <8 x i16> %n, splat (i16 15) + %vmovn.i4 = trunc nuw nsw <8 x i16> %shr to <8 x i8> + %shr1 = ashr <8 x i16> %n, splat (i16 9) + %vmovn.i = trunc nsw <8 x i16> %shr1 to <8 x i8> + %add = add nsw <8 x i8> %vmovn.i4, %vmovn.i + ret <8 x i8> %add +} + +define <2 x i32> @test_v2i64_smallsrl(<2 x i64> %n) { +; CHECK-LABEL: test_v2i64_smallsrl: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ushr v1.2d, v0.2d, #62 +; CHECK-NEXT: sshr v0.2d, v0.2d, #35 +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: xtn v0.2s, v0.2d +; CHECK-NEXT: add v0.2s, v1.2s, v0.2s +; CHECK-NEXT: ret +entry: + %shr = lshr <2 x i64> %n, splat (i64 62) + %vmovn.i4 = trunc nuw nsw <2 x i64> %shr to <2 x i32> + %shr1 = ashr <2 x i64> %n, splat (i64 35) + %vmovn.i = trunc nsw <2 x i64> %shr1 to <2 x i32> + %add = add nsw <2 x i32> %vmovn.i4, %vmovn.i + ret <2 x i32> %add +} + +define <2 x i32> @test_v2i64_smallsra(<2 x i64> %n) { +; CHECK-LABEL: test_v2i64_smallsra: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ushr v1.2d, v0.2d, #63 +; CHECK-NEXT: shrn v0.2s, v0.2d, #27 +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: add v0.2s, v1.2s, v0.2s +; CHECK-NEXT: ret +entry: + %shr = lshr <2 x i64> %n, splat (i64 63) + %vmovn.i4 = trunc nuw nsw <2 x i64> %shr to <2 x i32> + %shr1 = ashr <2 x i64> %n, splat (i64 27) + %vmovn.i = trunc nsw <2 x i64> %shr1 to <2 x i32> + %add = add nsw <2 x i32> %vmovn.i4, %vmovn.i + ret <2 x i32> %add +} + diff --git a/llvm/test/CodeGen/AArch64/deactivation-symbols.ll b/llvm/test/CodeGen/AArch64/deactivation-symbols.ll new file mode 100644 index 0000000000000..571b1067134b8 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/deactivation-symbols.ll @@ -0,0 +1,73 @@ +; RUN: llc < %s -O0 -mtriple=aarch64-none-linux-gnu -mattr=+pauth | FileCheck --check-prefixes=CHECK,O0 %s +; RUN: llc < %s -O2 -mtriple=aarch64-none-linux-gnu -mattr=+pauth | FileCheck --check-prefixes=CHECK,O2 %s + +@ds = external global i8 + +declare void @f(ptr %p) + +; CHECK: call: +define void @call(ptr %p) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: bl f + notail call void @f(ptr %p) [ "deactivation-symbol"(ptr @ds) ] + ret void +} + +; CHECK: pauth_sign_zero: +define i64 @pauth_sign_zero(i64 %p) { + ; O0: mov x8, xzr + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; O0-NEXT: pacia x0, x8 + ; O2-NEXT: paciza x0 + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 0) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed +} + +; CHECK: pauth_sign_const: +define i64 @pauth_sign_const(i64 %p) { + ; CHECK: mov x16, #12345 + ; CHECK-NEXT: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: pacia x0, x16 + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 12345) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed +} + +; CHECK: pauth_sign: +define i64 @pauth_sign(i64 %p, i64 %d) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: pacia x0, x1 + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 %d) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed +} + +; CHECK: pauth_auth_zero: +define i64 @pauth_auth_zero(i64 %p) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: autiza x0 + %authed = call i64 @llvm.ptrauth.auth(i64 %p, i32 0, i64 0) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} + +; CHECK: pauth_auth_const: +define i64 @pauth_auth_const(i64 %p) { + ; CHECK: mov x8, #12345 + ; CHECK-NEXT: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: autia x0, x8 + %authed = call i64 @llvm.ptrauth.auth(i64 %p, i32 0, i64 12345) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} + +; CHECK: pauth_auth: +define i64 @pauth_auth(i64 %p, i64 %d) { + ; CHECK: [[LABEL:.L.*]]: + ; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds + ; CHECK-NEXT: autia x0, x1 + %authed = call i64 @llvm.ptrauth.auth(i64 %p, i32 0, i64 %d) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} diff --git a/llvm/test/CodeGen/AArch64/ptrauth-irelative.ll b/llvm/test/CodeGen/AArch64/ptrauth-irelative.ll new file mode 100644 index 0000000000000..4ee1c19a86490 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/ptrauth-irelative.ll @@ -0,0 +1,95 @@ +; RUN: llc -mtriple aarch64-linux-gnu -mattr=+pauth -filetype=asm -o - %s | FileCheck %s + +; CHECK: nullref: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: movz x0, #0 +; CHECK-NEXT: mov x1, #1 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@nullref = constant ptr ptrauth (ptr null, i32 2, i64 1, ptr null), align 8 + +@dsolocal = external dso_local global i8 + +; CHECK: dsolocalref: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal +; CHECK-NEXT: add x0, x0, :lo12:dsolocal +; CHECK-NEXT: mov x1, #2 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@dsolocalref = constant ptr ptrauth (ptr @dsolocal, i32 2, i64 2, ptr null), align 8 + +@ds = external global i8 + +; CHECK: dsolocalrefds: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal +; CHECK-NEXT: add x0, x0, :lo12:dsolocal +; CHECK-NEXT: mov x1, #2 +; CHECK-NEXT: [[LABEL:.L.*]]: +; CHECK-NEXT: .reloc [[LABEL]], R_AARCH64_PATCHINST, ds +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: ret +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@dsolocalrefds = constant ptr ptrauth (ptr @dsolocal, i32 2, i64 2, ptr null, ptr @ds), align 8 + +; CHECK: dsolocalref8: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal+8 +; CHECK-NEXT: add x0, x0, :lo12:dsolocal+8 +; CHECK-NEXT: mov x1, #3 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@dsolocalref8 = constant ptr ptrauth (ptr getelementptr (i8, ptr @dsolocal, i64 8), i32 2, i64 3, ptr null), align 8 + +; CHECK: disc: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, dsolocal +; CHECK-NEXT: add x0, x0, :lo12:dsolocal +; CHECK-NEXT: adrp x1, [[PLACE]] +; CHECK-NEXT: add x1, x1, :lo12:[[PLACE]] +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@disc = constant ptr ptrauth (ptr @dsolocal, i32 2, i64 0, ptr @disc), align 8 + +@global = external global i8 + +; CHECK: globalref: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, :got:global +; CHECK-NEXT: ldr x0, [x0, :got_lo12:global] +; CHECK-NEXT: mov x1, #4 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@globalref = constant ptr ptrauth (ptr @global, i32 2, i64 4, ptr null), align 8 + +; CHECK: globalref8: +; CHECK-NEXT: [[PLACE:.*]]: +; CHECK-NEXT: .section .text.startup +; CHECK-NEXT: [[FUNC:.*]]: +; CHECK-NEXT: adrp x0, :got:global +; CHECK-NEXT: ldr x0, [x0, :got_lo12:global] +; CHECK-NEXT: add x0, x0, #8 +; CHECK-NEXT: mov x1, #5 +; CHECK-NEXT: b __emupac_pacda +; CHECK-NEXT: .section .rodata +; CHECK-NEXT: .xword [[FUNC]]@FUNCINIT +@globalref8 = constant ptr ptrauth (ptr getelementptr (i8, ptr @global, i64 8), i32 2, i64 5, ptr null), align 8 diff --git a/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll b/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll index fbd777911aecb..31ef6cba6fbdd 100644 --- a/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll +++ b/llvm/test/CodeGen/AArch64/ptrauth-type-info-vptr-discr.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple aarch64-linux-gnu -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=ELF %s +; RUN: llc -mtriple aarch64-linux-musl -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=ELF %s ; RUN: llc -mtriple aarch64-apple-darwin -mattr=+pauth -filetype=asm -o - %s | FileCheck --check-prefix=MACHO %s ; ELF-LABEL: _ZTI10Disc: diff --git a/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll b/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll index 4fef9624d8ad6..459615139d745 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-kernel-lds-constexpr.ll @@ -14,13 +14,13 @@ ; Use constant from different kernels ;. -; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t poison, align 2 -; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t poison, align 2 -; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t poison, align 4 -; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t poison, align 16 -; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t poison, align 2 -; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t poison, align 16 -; CHECK: @llvm.amdgcn.kernel.k6.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k6.lds.t poison, align 16 +; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t poison, align 2, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t poison, align 2, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t poison, align 4, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t poison, align 16, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t poison, align 2, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t poison, align 16, !absolute_symbol !0 +; CHECK: @llvm.amdgcn.kernel.k6.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k6.lds.t poison, align 16, !absolute_symbol !0 ;. define amdgpu_kernel void @k0(i64 %x) { ; CHECK-LABEL: @k0( @@ -67,7 +67,7 @@ define amdgpu_kernel void @k3(i64 %x) { ; CHECK-LABEL: @k3( ; CHECK-NEXT: %1 = getelementptr inbounds [32 x i8], ptr addrspace(3) @llvm.amdgcn.kernel.k3.lds, i32 0, i32 16 ; CHECK-NEXT: %ptr1 = addrspacecast ptr addrspace(3) %1 to ptr -; CHECK-NEXT: store i64 1, ptr %ptr1, align 1 +; CHECK-NEXT: store i64 1, ptr %ptr1, align 16 ; CHECK-NEXT: %2 = getelementptr inbounds [32 x i8], ptr addrspace(3) @llvm.amdgcn.kernel.k3.lds, i32 0, i32 24 ; CHECK-NEXT: %ptr2 = addrspacecast ptr addrspace(3) %2 to ptr ; CHECK-NEXT: store i64 2, ptr %ptr2, align 8 @@ -98,9 +98,9 @@ define amdgpu_kernel void @k4(i64 %x) { ; Multiple constexpr use in a same instruction. define amdgpu_kernel void @k5() { ; CHECK-LABEL: @k5( -; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr -; CHECK-NEXT: %2 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr -; CHECK-NEXT: call void poison(ptr %1, ptr %2) +; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr +; CHECK-NEXT: call void poison(ptr %1, ptr %1) +; CHECK-NEXT: ret void ; call void poison(ptr addrspacecast (ptr addrspace(3) @lds.4 to ptr), ptr addrspacecast (ptr addrspace(3) @lds.4 to ptr)) ret void @@ -113,13 +113,22 @@ define amdgpu_kernel void @k5() { ; expression operands of store should be replaced by equivalent instruction sequences. define amdgpu_kernel void @k6() { ; CHECK-LABEL: @k6( - -; CHECK-NEXT: %1 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 -; CHECK-NEXT: %2 = ptrtoint ptr addrspace(3) %1 to i32 -; CHECK-NEXT: %3 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 -; CHECK-NEXT: store i32 %2, ptr addrspace(3) %3, align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: %1 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 +; CHECK-NEXT: %2 = ptrtoint ptr addrspace(3) %1 to i32 +; CHECK-NEXT: %3 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2 +; CHECK-NEXT: store i32 %2, ptr addrspace(3) %3, align 8 +; CHECK-NEXT: ret void ; + store i32 ptrtoint (ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @lds.5, i32 0, i32 2) to i32), ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @lds.5, i32 0, i32 2) ret void } +;. +; CHECK: attributes #0 = { "amdgpu-lds-size"="2" } +; CHECK: attributes #1 = { "amdgpu-lds-size"="4" } +; CHECK: attributes #2 = { "amdgpu-lds-size"="32" } +; CHECK: attributes #3 = { "amdgpu-lds-size"="2020" } +; CHECK: attributes #4 = { "amdgpu-lds-size"="16" } +;. +; CHECK: !0 = !{i32 0, i32 1} +;. diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll index a2761193c2d65..deb2d00e8bd81 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-constantexpr.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s @@ -9,73 +10,78 @@ @kern = addrspace(3) global float poison, align 4 ; @a_func is only used from a non-kernel function so is rewritten -; CHECK-NOT: @a_func ; @b_both is used from a non-kernel function so is rewritten -; CHECK-NOT: @b_both ; sorted both < func, so @b_both at null and @a_func at 4 @b_both = addrspace(3) global float poison, align 4 -; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t poison, align 4 -; CHECK: @llvm.amdgcn.kernel.timestwo.lds = internal addrspace(3) global %llvm.amdgcn.kernel.timestwo.lds.t poison, align 4 -; CHECK-LABEL: @get_func() -; CHECK: %0 = addrspacecast ptr addrspace(3) @llvm.amdgcn.module.lds to ptr -; CHECK: %1 = ptrtoint ptr %0 to i64 -; CHECK: %2 = addrspacecast ptr addrspace(3) @llvm.amdgcn.module.lds to ptr -; CHECK: %3 = ptrtoint ptr %2 to i64 -; CHECK: %4 = add i64 %1, %3 -; CHECK: %5 = inttoptr i64 %4 to ptr -; CHECK: %6 = load i32, ptr %5, align 4 -; CHECK: ret i32 %6 define i32 @get_func() local_unnamed_addr #0 { +; CHECK-LABEL: define i32 @get_func() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.module.lds to ptr +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +; CHECK-NEXT: ret i32 [[TMP4]] +; entry: %0 = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @a_func to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @a_func to ptr) to i64)) to ptr), align 4 ret i32 %0 } -; CHECK-LABEL: @set_func(i32 %x) -; CHECK: %0 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.module.lds.t, ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1) to ptr -; CHECK: %1 = ptrtoint ptr %0 to i64 -; CHECK: %2 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.module.lds.t, ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1) to ptr -; CHECK: %3 = ptrtoint ptr %2 to i64 -; CHECK: %4 = add i64 %1, %3 -; CHECK: %5 = inttoptr i64 %4 to ptr -; CHECK: store i32 %x, ptr %5, align 4 -; CHECK: ret void define void @set_func(i32 %x) { +; CHECK-LABEL: define void @set_func( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1) to ptr +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: store i32 [[X]], ptr [[TMP3]], align 4 +; CHECK-NEXT: ret void +; entry: store i32 %x, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64)) to ptr), align 4 ret void } -; CHECK-LABEL: @timestwo() #0 -; CHECK-NOT: call void @llvm.donothing() -; CHECK: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr -; CHECK: %2 = ptrtoint ptr %1 to i64 -; CHECK: %3 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.kernel.timestwo.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr -; CHECK: %4 = ptrtoint ptr %3 to i64 -; CHECK: %5 = add i64 %2, %4 -; CHECK: %6 = inttoptr i64 %5 to ptr -; CHECK: %ld = load i32, ptr %6, align 4 -; CHECK: %mul = mul i32 %ld, 2 -; CHECK: %7 = addrspacecast ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.kernel.timestwo.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr -; CHECK: %8 = ptrtoint ptr %7 to i64 -; CHECK: %9 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr -; CHECK: %10 = ptrtoint ptr %9 to i64 -; CHECK: %11 = add i64 %8, %10 -; CHECK: %12 = inttoptr i64 %11 to ptr -; CHECK: store i32 %mul, ptr %12, align 4 -; CHECK: ret void define amdgpu_kernel void @timestwo() { +; CHECK-LABEL: define amdgpu_kernel void @timestwo( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr +; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TIMESTWO_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP3]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 2 +; CHECK-NEXT: [[TMP7:%.*]] = addrspacecast ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TIMESTWO_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds, i32 0, i32 1) to ptr +; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[TMP7]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.timestwo.lds to ptr +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP8]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: store i32 [[MUL]], ptr [[TMP12]], align 4 +; CHECK-NEXT: ret void +; %ld = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @kern to ptr) to i64)) to ptr), align 4 %mul = mul i32 %ld, 2 store i32 %mul, ptr inttoptr (i64 add (i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @kern to ptr) to i64), i64 ptrtoint (ptr addrspacecast (ptr addrspace(3) @b_both to ptr) to i64)) to ptr), align 4 ret void } -; CHECK-LABEL: @through_functions() #0 define amdgpu_kernel void @through_functions() { +; CHECK-LABEL: define amdgpu_kernel void @through_functions( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ] +; CHECK-NEXT: [[LD:%.*]] = call i32 @get_func() +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 4 +; CHECK-NEXT: call void @set_func(i32 [[MUL]]) +; CHECK-NEXT: ret void +; %ld = call i32 @get_func() %mul = mul i32 %ld, 4 call void @set_func(i32 %mul) diff --git a/llvm/test/CodeGen/AMDGPU/same-lds-variable-multiple-use-in-one-phi-node.ll b/llvm/test/CodeGen/AMDGPU/same-lds-variable-multiple-use-in-one-phi-node.ll new file mode 100644 index 0000000000000..35a9bee03411f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/same-lds-variable-multiple-use-in-one-phi-node.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-lower-module-lds %s -o - | FileCheck %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-lower-module-lds %s -o - | FileCheck %s + +@lds = internal unnamed_addr addrspace(3) global [6144 x half] poison, align 2 + +define amdgpu_kernel void @test(ptr addrspace(1) %out) { +; CHECK-LABEL: define amdgpu_kernel void @test( +; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: switch i32 0, label %[[BB_3:.*]] [ +; CHECK-NEXT: i32 18, label %[[BB_2:.*]] +; CHECK-NEXT: i32 1, label %[[BB_2]] +; CHECK-NEXT: i32 0, label %[[BB_3]] +; CHECK-NEXT: ] +; CHECK: [[BB_1:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.test.lds to ptr +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[TMP0]] to i64 +; CHECK-NEXT: switch i32 0, label %[[BB_3]] [ +; CHECK-NEXT: i32 18, label %[[BB_2]] +; CHECK-NEXT: i32 1, label %[[BB_2]] +; CHECK-NEXT: i32 0, label %[[BB_3]] +; CHECK-NEXT: ] +; CHECK: [[BB_2]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[TMP1]], %[[BB_1]] ], [ [[TMP1]], %[[BB_1]] ], [ 10, %[[ENTRY]] ], [ 10, %[[ENTRY]] ] +; CHECK-NEXT: store i64 [[PHI]], ptr addrspace(1) [[OUT]], align 8 +; CHECK-NEXT: br label %[[BB_3]] +; CHECK: [[BB_3]]: +; CHECK-NEXT: ret void +; +entry: + switch i32 0, label %bb.3 [ + i32 18, label %bb.2 + i32 1, label %bb.2 + i32 0, label %bb.3 + ] +bb.1: + switch i32 0, label %bb.3 [ + i32 18, label %bb.2 + i32 1, label %bb.2 + i32 0, label %bb.3 + ] + +bb.2: + %phi = phi i64 [ ptrtoint (ptr addrspacecast (ptr addrspace(3) @lds to ptr) to i64), %bb.1 ], [ ptrtoint (ptr addrspacecast (ptr addrspace(3) @lds to ptr) to i64), %bb.1 ], [10, %entry], [10, %entry] + store i64 %phi, ptr addrspace(1) %out, align 8 + br label %bb.3 + +bb.3: + ret void +} diff --git a/llvm/test/CodeGen/MIR/AArch64/deactivation-symbols.mir b/llvm/test/CodeGen/MIR/AArch64/deactivation-symbols.mir new file mode 100644 index 0000000000000..6542508ede116 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AArch64/deactivation-symbols.mir @@ -0,0 +1,12 @@ +# RUN: llc < %s -O0 -mtriple=aarch64-none-linux-gnu -mattr=+pauth -run-pass irtranslator -x mir | \ +# RUN: llc -x mir -run-pass legalizer | FileCheck %s + +--- | + @ds = external global i8 + + define i64 @pauth_sign_zero(i64 %p) { + ; CHECK: G_INTRINSIC intrinsic(@llvm.ptrauth.sign), %0(s64), 0, %2(s64), deactivation-symbol @ds + %signed = call i64 @llvm.ptrauth.sign(i64 %p, i32 0, i64 0) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %signed + } +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll index 4246aa545dd0e..ae6418ee97ba5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll @@ -121,8 +121,6 @@ define double @fdiv_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.sqrt.f64(double) - define double @fsqrt_d(double %a) nounwind { ; CHECKIFD-LABEL: fsqrt_d: ; CHECKIFD: # %bb.0: @@ -150,8 +148,6 @@ define double @fsqrt_d(double %a) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @fsgnj_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fsgnj_d: ; CHECKIFD: # %bb.0: @@ -261,8 +257,6 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ret double %2 } -declare double @llvm.fabs.f64(double) - ; This function performs extra work to ensure that ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. define double @fabs_d(double %a, double %b) nounwind { @@ -305,8 +299,6 @@ define double @fabs_d(double %a, double %b) nounwind { ret double %3 } -declare double @llvm.minnum.f64(double, double) - define double @fmin_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmin_d: ; CHECKIFD: # %bb.0: @@ -334,8 +326,6 @@ define double @fmin_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @fmax_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmax_d: ; CHECKIFD: # %bb.0: @@ -363,8 +353,6 @@ define double @fmax_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.minimumnum.f64(double, double) - define double @fminimumnum_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fminimumnum_d: ; CHECKIFD: # %bb.0: @@ -392,8 +380,6 @@ define double @fminimumnum_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maximumnum.f64(double, double) - define double @fmaximumnum_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmaximumnum_d: ; CHECKIFD: # %bb.0: @@ -421,8 +407,6 @@ define double @fmaximumnum_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fmadd_d(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmadd_d: ; CHECKIFD: # %bb.0: @@ -771,7 +755,6 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind { ret double %neg } - define double @fnmadd_nsz(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fnmadd_nsz: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll index 4b0acda839ad6..906e4bc41d960 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll @@ -10,8 +10,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare double @llvm.sqrt.f64(double) - define double @sqrt_f64(double %a) nounwind { ; CHECKIFD-LABEL: sqrt_f64: ; CHECKIFD: # %bb.0: @@ -81,8 +79,6 @@ define double @powi_f64(double %a, i32 %b) nounwind { ret double %1 } -declare double @llvm.sin.f64(double) - define double @sin_f64(double %a) nounwind { ; RV32IFD-LABEL: sin_f64: ; RV32IFD: # %bb.0: @@ -123,8 +119,6 @@ define double @sin_f64(double %a) nounwind { ret double %1 } -declare double @llvm.cos.f64(double) - define double @cos_f64(double %a) nounwind { ; RV32IFD-LABEL: cos_f64: ; RV32IFD: # %bb.0: @@ -257,8 +251,6 @@ define double @sincos_f64(double %a) nounwind { ret double %3 } -declare double @llvm.pow.f64(double, double) - define double @pow_f64(double %a, double %b) nounwind { ; RV32IFD-LABEL: pow_f64: ; RV32IFD: # %bb.0: @@ -299,8 +291,6 @@ define double @pow_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.exp.f64(double) - define double @exp_f64(double %a) nounwind { ; RV32IFD-LABEL: exp_f64: ; RV32IFD: # %bb.0: @@ -341,8 +331,6 @@ define double @exp_f64(double %a) nounwind { ret double %1 } -declare double @llvm.exp2.f64(double) - define double @exp2_f64(double %a) nounwind { ; RV32IFD-LABEL: exp2_f64: ; RV32IFD: # %bb.0: @@ -423,8 +411,6 @@ define double @exp10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log.f64(double) - define double @log_f64(double %a) nounwind { ; RV32IFD-LABEL: log_f64: ; RV32IFD: # %bb.0: @@ -465,8 +451,6 @@ define double @log_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log10.f64(double) - define double @log10_f64(double %a) nounwind { ; RV32IFD-LABEL: log10_f64: ; RV32IFD: # %bb.0: @@ -507,8 +491,6 @@ define double @log10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log2.f64(double) - define double @log2_f64(double %a) nounwind { ; RV32IFD-LABEL: log2_f64: ; RV32IFD: # %bb.0: @@ -549,8 +531,6 @@ define double @log2_f64(double %a) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fma_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fma_f64: ; CHECKIFD: # %bb.0: @@ -578,8 +558,6 @@ define double @fma_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fmuladd.f64(double, double, double) - define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmuladd_f64: ; CHECKIFD: # %bb.0: @@ -621,8 +599,6 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fabs.f64(double) - define double @fabs_f64(double %a) nounwind { ; CHECKIFD-LABEL: fabs_f64: ; CHECKIFD: # %bb.0: @@ -644,8 +620,6 @@ define double @fabs_f64(double %a) nounwind { ret double %1 } -declare double @llvm.minnum.f64(double, double) - define double @minnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: minnum_f64: ; CHECKIFD: # %bb.0: @@ -673,8 +647,6 @@ define double @minnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @maxnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: maxnum_f64: ; CHECKIFD: # %bb.0: @@ -702,8 +674,6 @@ define double @maxnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @copysign_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: copysign_f64: ; CHECKIFD: # %bb.0: @@ -731,8 +701,6 @@ define double @copysign_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.floor.f64(double) - define double @floor_f64(double %a) nounwind { ; RV32IFD-LABEL: floor_f64: ; RV32IFD: # %bb.0: @@ -773,8 +741,6 @@ define double @floor_f64(double %a) nounwind { ret double %1 } -declare double @llvm.ceil.f64(double) - define double @ceil_f64(double %a) nounwind { ; RV32IFD-LABEL: ceil_f64: ; RV32IFD: # %bb.0: @@ -815,8 +781,6 @@ define double @ceil_f64(double %a) nounwind { ret double %1 } -declare double @llvm.trunc.f64(double) - define double @trunc_f64(double %a) nounwind { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -857,8 +821,6 @@ define double @trunc_f64(double %a) nounwind { ret double %1 } -declare double @llvm.rint.f64(double) - define double @rint_f64(double %a) nounwind { ; RV32IFD-LABEL: rint_f64: ; RV32IFD: # %bb.0: @@ -899,8 +861,6 @@ define double @rint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.nearbyint.f64(double) - define double @nearbyint_f64(double %a) nounwind { ; RV32IFD-LABEL: nearbyint_f64: ; RV32IFD: # %bb.0: @@ -941,8 +901,6 @@ define double @nearbyint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.round.f64(double) - define double @round_f64(double %a) nounwind { ; RV32IFD-LABEL: round_f64: ; RV32IFD: # %bb.0: @@ -983,8 +941,6 @@ define double @round_f64(double %a) nounwind { ret double %1 } -declare double @llvm.roundeven.f64(double) - define double @roundeven_f64(double %a) nounwind { ; RV32IFD-LABEL: roundeven_f64: ; RV32IFD: # %bb.0: @@ -1025,7 +981,6 @@ define double @roundeven_f64(double %a) nounwind { ret double %1 } -declare i1 @llvm.is.fpclass.f64(double, i32) define i1 @isnan_d_fpclass(double %x) { ; CHECKIFD-LABEL: isnan_d_fpclass: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll index 3222849641baf..06eeaa8d4e503 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll @@ -121,8 +121,6 @@ define float @fdiv_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.sqrt.f32(float) - define float @fsqrt_s(float %a) nounwind { ; CHECKIF-LABEL: fsqrt_s: ; CHECKIF: # %bb.0: @@ -150,8 +148,6 @@ define float @fsqrt_s(float %a) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - define float @fsgnj_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fsgnj_s: ; CHECKIF: # %bb.0: @@ -270,8 +266,6 @@ define float @fsgnjn_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.fabs.f32(float) - define float @fabs_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fabs_s: ; CHECKIF: # %bb.0: @@ -311,8 +305,6 @@ define float @fabs_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.minimumnum.f32(float, float) - define float @fminimumnum_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fminimumnum_s: ; CHECKIF: # %bb.0: @@ -340,8 +332,6 @@ define float @fminimumnum_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maximumnum.f32(float, float) - define float @fmaximumnum_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmaximumnum_s: ; CHECKIF: # %bb.0: @@ -369,8 +359,6 @@ define float @fmaximumnum_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.minnum.f32(float, float) - define float @fmin_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmin_s: ; CHECKIF: # %bb.0: @@ -398,8 +386,6 @@ define float @fmin_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maxnum.f32(float, float) - define float @fmax_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmax_s: ; CHECKIF: # %bb.0: @@ -427,8 +413,6 @@ define float @fmax_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fmadd_s(float %a, float %b, float %c) nounwind { ; CHECKIF-LABEL: fmadd_s: ; CHECKIF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll index 31a78d4f72ceb..8ced3155c58ec 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll @@ -8,11 +8,6 @@ ; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64ZBB -declare i8 @llvm.abs.i8(i8, i1 immarg) -declare i16 @llvm.abs.i16(i16, i1 immarg) -declare i32 @llvm.abs.i32(i32, i1 immarg) -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i8 @abs8(i8 %x) { ; RV32I-LABEL: abs8: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll index 48d72108335e4..cd1e95e88ab8a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vacopy.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare void @llvm.va_copy(ptr, ptr) define void @test_va_copy(ptr %dest_list, ptr %src_list) { ; RV32I-LABEL: name: test_va_copy ; RV32I: bb.1 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll index 74961d12c1c85..ad1544db84391 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll @@ -25,9 +25,6 @@ ; The nounwind attribute is omitted for some of the tests, to check that CFI ; directives are correctly generated. -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - declare void @notdead(ptr) ; Although frontends are recommended to not generate va_arg due to the lack of @@ -453,7 +450,6 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind { ret i32 %1 } - define i32 @va1_va_arg(ptr %fmt, ...) nounwind { ; RV32-LABEL: name: va1_va_arg ; RV32: bb.1 (%ir-block.0): @@ -1249,8 +1245,6 @@ define void @va3_caller() nounwind { ret void } -declare void @llvm.va_copy(ptr, ptr) - define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32-LABEL: name: va4_va_copy ; ILP32: bb.1 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll index 46d1661983c6a..f70e27906474a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll @@ -1749,7 +1749,6 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshl.i32(i32, i32, i32) define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotl_64_mask_shared: @@ -1984,7 +1983,6 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshl.i64(i64, i64, i64) define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotr_32_mask_shared: @@ -2050,7 +2048,6 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshr.i32(i32, i32, i32) define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotr_64_mask_shared: @@ -2291,7 +2288,6 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshr.i64(i64, i64, i64) define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotl_32_mask_multiple: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll index da95481a5e588..83cf228402295 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll @@ -111,8 +111,6 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind { ret i64 %xor } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: rol_i32: ; RV32I: # %bb.0: @@ -133,8 +131,6 @@ define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: rol_i64: ; CHECK: # %bb.0: @@ -187,8 +183,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: ror_i32: ; RV32I: # %bb.0: @@ -209,8 +203,6 @@ define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: ror_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll index 0b376dd779887..a59a46bdd0e7f 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: @@ -57,8 +55,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV32I-LABEL: ctlz_i64: ; RV32I: # %bb.0: @@ -153,8 +149,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: @@ -197,8 +191,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: cttz_i64: ; RV32I: # %bb.0: @@ -276,8 +268,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define i32 @ctpop_i32(i32 %a) nounwind { ; RV32I-LABEL: ctpop_i32: ; RV32I: # %bb.0: @@ -312,8 +302,6 @@ define i32 @ctpop_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV32I-LABEL: ctpop_i64: ; RV32I: # %bb.0: @@ -737,8 +725,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV32I-LABEL: abs_i32: ; RV32I: # %bb.0: @@ -756,8 +742,6 @@ define i32 @abs_i32(i32 %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -805,8 +789,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: bswap_i32: ; RV32I: # %bb.0: @@ -831,8 +813,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV32I-LABEL: bswap_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll index b7f84ba696c26..8a21889334fb8 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll @@ -102,8 +102,6 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind { ret i64 %xor } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: @@ -161,8 +159,6 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: rol_i64: ; RV64I: # %bb.0: @@ -180,8 +176,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: @@ -239,8 +233,6 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: ror_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll index 2dd3bb3119dd3..daac8440e5763 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - ; FIXME: We don't need the shift pair before the beqz for RV64I. define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: @@ -318,8 +316,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: @@ -393,8 +389,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: @@ -595,8 +589,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: @@ -659,8 +651,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define signext i32 @ctpop_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctpop_i32: ; RV64I: # %bb.0: @@ -756,8 +746,6 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind { ret i32 %1 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV64I-LABEL: ctpop_i64: ; RV64I: # %bb.0: @@ -1028,8 +1016,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV64I-LABEL: abs_i32: ; RV64I: # %bb.0: @@ -1067,8 +1053,6 @@ define signext i32 @abs_i32_sext(i32 signext %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; RV64I-LABEL: abs_i64: ; RV64I: # %bb.0: @@ -1116,8 +1100,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: @@ -1173,8 +1155,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV64I-LABEL: bswap_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll index 21f14d941993b..c3cc472c4706f 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -557,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -579,13 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -696,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -742,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -764,13 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -788,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -810,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1020,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1042,13 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1088,13 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1134,13 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1180,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1226,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1250,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1272,13 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1296,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1318,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1364,13 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1388,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1410,13 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1456,13 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1480,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1526,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,13 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1594,13 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1618,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1640,13 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1664,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1686,13 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1710,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1732,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1756,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1778,13 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1802,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1824,13 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll index 9e092e4337526..1dd3a831903b5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll @@ -10,12 +10,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d -global-isel | FileCheck %s -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -34,13 +28,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -60,12 +47,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -84,13 +65,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -110,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -134,13 +102,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -160,12 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,13 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -260,12 +195,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -284,13 +213,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -311,12 +233,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -335,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -361,12 +270,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -385,13 +288,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -411,12 +307,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -435,13 +325,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -461,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -485,13 +362,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -511,12 +381,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -535,13 +399,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -562,12 +419,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -586,13 +437,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -612,12 +456,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -636,13 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -662,12 +493,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -686,13 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +530,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -736,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -763,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -787,13 +586,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -813,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -837,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -863,12 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -887,13 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -913,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -937,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -963,12 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -987,13 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1013,12 +753,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1037,13 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1063,12 +790,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +808,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1113,12 +827,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1137,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1163,12 +864,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1187,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1213,12 +901,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1237,13 +919,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1263,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1287,13 +956,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1313,12 +975,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1337,13 +993,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1363,12 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1387,13 +1030,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1413,12 +1049,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1463,12 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1487,13 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll index 3a74bcd06222b..943fe3e201c4b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vle.nxv1i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -950,11 +686,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -993,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1013,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1036,11 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f16_nxv1f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1056,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1079,11 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f16_nxv2f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1099,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1122,11 +810,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f16_nxv4f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1142,13 +825,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1165,11 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f16_nxv8f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1185,13 +856,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1208,11 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f16_nxv16f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1228,13 +887,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +903,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32f16_nxv32f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1294,11 +934,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1314,13 +949,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1337,11 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1357,13 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1380,11 +996,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1400,13 +1011,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1423,11 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1443,13 +1042,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1466,11 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1486,13 +1073,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1509,11 +1089,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1529,13 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1552,11 +1120,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv64i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1572,13 +1135,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv64i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll index 12279639893bc..ba1e365084165 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -global-isel -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vlm.nxv1i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv2i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv4i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv8i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv16i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv32i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv64i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll index 5cb55f15c7c8c..48b162078ea86 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1174,14 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1199,12 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1221,14 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1268,14 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1293,12 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1315,14 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll index fafd45b7579e8..6b676890bcb65 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -840,14 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -865,12 +613,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -889,14 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -914,12 +648,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -937,14 +665,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -962,12 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -985,14 +699,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1010,12 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1033,14 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1058,12 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1081,14 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1106,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1129,14 +801,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1176,14 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1201,12 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1270,14 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1295,12 +917,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1317,14 +933,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1342,12 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1364,14 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1389,12 +983,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1412,14 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1437,12 +1017,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,14 +1035,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1052,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1510,14 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1535,12 +1087,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1559,14 +1105,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1584,12 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1607,14 +1139,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1632,12 +1156,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1655,14 +1173,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1680,12 +1190,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1703,14 +1207,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1728,12 +1224,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1751,14 +1241,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1776,12 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1799,14 +1275,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1824,12 +1292,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1847,14 +1309,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1872,12 +1326,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1894,14 +1342,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1919,12 +1359,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1941,14 +1375,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1966,12 +1392,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1988,14 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2013,12 +1425,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2035,14 +1441,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2060,12 +1458,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2082,14 +1474,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2107,12 +1491,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2129,14 +1507,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2154,12 +1524,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2177,14 +1541,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2202,12 +1558,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2225,14 +1575,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2250,12 +1592,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2274,14 +1610,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2299,12 +1627,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2323,14 +1645,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2348,12 +1662,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2372,14 +1680,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2397,12 +1697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2420,14 +1714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2445,12 +1731,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2469,14 +1749,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2494,12 +1766,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2518,14 +1784,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2543,12 +1801,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2567,14 +1819,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2592,12 +1836,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2614,14 +1852,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2639,12 +1869,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2661,14 +1885,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2686,12 +1902,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2708,14 +1918,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2733,12 +1935,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2755,14 +1951,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2780,12 +1968,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2802,14 +1984,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2827,12 +2001,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2849,14 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2874,12 +2034,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2897,14 +2051,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2922,12 +2068,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2945,14 +2085,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2970,12 +2102,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2994,14 +2120,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3019,12 +2137,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3043,14 +2155,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3068,12 +2172,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3092,14 +2190,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3117,12 +2207,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3140,14 +2224,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3165,12 +2241,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3189,14 +2259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3214,12 +2276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3238,14 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3263,12 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3287,14 +2329,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3312,12 +2346,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3334,14 +2362,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3359,12 +2379,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3381,14 +2395,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3406,12 +2412,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3428,14 +2428,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3453,12 +2445,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3475,14 +2461,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2478,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3522,14 +2494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3547,12 +2511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3569,14 +2527,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3594,12 +2544,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3616,14 +2560,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3641,12 +2577,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3664,14 +2594,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3689,12 +2611,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3712,14 +2628,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3737,12 +2645,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3760,14 +2662,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3785,12 +2679,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3809,14 +2697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3834,12 +2714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3858,14 +2732,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3883,12 +2749,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3907,14 +2767,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3932,12 +2784,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3955,14 +2801,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3980,12 +2818,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4003,14 +2835,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4028,12 +2852,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4052,14 +2870,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4077,12 +2887,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4101,14 +2905,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4126,12 +2922,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4150,14 +2940,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4175,12 +2957,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4198,14 +2974,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4223,12 +2991,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,14 +3009,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4272,12 +3026,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4296,14 +3044,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4321,12 +3061,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4345,14 +3079,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4370,12 +3096,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4393,14 +3113,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4418,12 +3130,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4441,14 +3147,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3164,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4489,14 +3181,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4514,12 +3198,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4538,14 +3216,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4563,12 +3233,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4587,14 +3251,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4612,12 +3268,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4636,14 +3286,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4661,12 +3303,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4684,14 +3320,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4709,12 +3337,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4732,14 +3354,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4757,12 +3371,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4781,14 +3389,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4806,12 +3406,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4830,14 +3424,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4855,12 +3441,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4879,14 +3459,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4904,12 +3476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4927,14 +3493,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4952,12 +3510,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4976,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5001,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5025,14 +3563,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5050,12 +3580,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5074,14 +3598,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll index 14abfa1b44ca7..871fc31f6f33b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vlse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -709,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -731,14 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -778,14 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,14 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -850,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -872,14 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -897,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -919,14 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -944,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -966,14 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -991,12 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1013,14 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1038,12 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1060,14 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1085,12 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1107,14 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f16_nxv1f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1154,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1179,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f16_nxv2f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1201,14 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1226,12 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f16_nxv4f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1248,14 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1273,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f16_nxv8f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1295,14 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1320,12 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f16_nxv16f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1342,14 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1367,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32f16_nxv32f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1389,14 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1414,12 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1436,14 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1461,12 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1483,14 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1508,12 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1530,14 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1555,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1577,14 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1602,12 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1624,14 +1142,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1649,12 +1159,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1671,14 +1175,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1696,12 +1192,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1718,14 +1208,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll index 916af2556c6a8..0c4afaf0a7397 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1174,14 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1199,12 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1221,14 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1268,14 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1293,12 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1315,14 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll index 8dd32a1d640dc..ce6ba6c3209d8 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -840,14 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -865,12 +613,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -889,14 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -914,12 +648,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -937,14 +665,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -962,12 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -985,14 +699,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1010,12 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1033,14 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1058,12 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1081,14 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1106,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1129,14 +801,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1176,14 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1201,12 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1270,14 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1295,12 +917,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1317,14 +933,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1342,12 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1364,14 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1389,12 +983,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1412,14 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1437,12 +1017,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,14 +1035,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1052,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1510,14 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1535,12 +1087,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1559,14 +1105,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1584,12 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1607,14 +1139,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1632,12 +1156,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1655,14 +1173,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1680,12 +1190,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1703,14 +1207,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1728,12 +1224,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1751,14 +1241,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1776,12 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1799,14 +1275,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1824,12 +1292,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1847,14 +1309,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1872,12 +1326,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1894,14 +1342,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1919,12 +1359,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1941,14 +1375,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1966,12 +1392,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1988,14 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2013,12 +1425,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2035,14 +1441,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2060,12 +1458,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2082,14 +1474,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2107,12 +1491,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2129,14 +1507,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2154,12 +1524,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2177,14 +1541,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2202,12 +1558,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2225,14 +1575,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2250,12 +1592,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2274,14 +1610,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2299,12 +1627,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2323,14 +1645,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2348,12 +1662,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2372,14 +1680,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2397,12 +1697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2420,14 +1714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2445,12 +1731,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2469,14 +1749,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2494,12 +1766,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2518,14 +1784,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2543,12 +1801,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2567,14 +1819,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2592,12 +1836,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2614,14 +1852,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2639,12 +1869,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2661,14 +1885,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2686,12 +1902,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2708,14 +1918,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2733,12 +1935,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2755,14 +1951,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2780,12 +1968,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2802,14 +1984,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2827,12 +2001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2849,14 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2874,12 +2034,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2897,14 +2051,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2922,12 +2068,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2945,14 +2085,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2970,12 +2102,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2994,14 +2120,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3019,12 +2137,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3043,14 +2155,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3068,12 +2172,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3092,14 +2190,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3117,12 +2207,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3140,14 +2224,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3165,12 +2241,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3189,14 +2259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3214,12 +2276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3238,14 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3263,12 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3287,14 +2329,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3312,12 +2346,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3334,14 +2362,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3359,12 +2379,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3381,14 +2395,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3406,12 +2412,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3428,14 +2428,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3453,12 +2445,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3475,14 +2461,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2478,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3522,14 +2494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3547,12 +2511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3569,14 +2527,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3594,12 +2544,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3616,14 +2560,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3641,12 +2577,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3664,14 +2594,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3689,12 +2611,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3712,14 +2628,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3737,12 +2645,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3760,14 +2662,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3785,12 +2679,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3809,14 +2697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3834,12 +2714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3858,14 +2732,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3883,12 +2749,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3907,14 +2767,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3932,12 +2784,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3955,14 +2801,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3980,12 +2818,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4003,14 +2835,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4028,12 +2852,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4052,14 +2870,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4077,12 +2887,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4101,14 +2905,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4126,12 +2922,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4150,14 +2940,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4175,12 +2957,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4198,14 +2974,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4223,12 +2991,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,14 +3009,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4272,12 +3026,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4296,14 +3044,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4321,12 +3061,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4345,14 +3079,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4370,12 +3096,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4393,14 +3113,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4418,12 +3130,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4441,14 +3147,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3164,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4489,14 +3181,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4514,12 +3198,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4538,14 +3216,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4563,12 +3233,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4587,14 +3251,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4612,12 +3268,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4636,14 +3286,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4661,12 +3303,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4684,14 +3320,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4709,12 +3337,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4732,14 +3354,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4757,12 +3371,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4781,14 +3389,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4806,12 +3406,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4830,14 +3424,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4855,12 +3441,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4879,14 +3459,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4904,12 +3476,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4927,14 +3493,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4952,12 +3510,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4976,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5001,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5025,14 +3563,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5050,12 +3580,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5074,14 +3598,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll index 785d9fc6a7970..adf73e35ccdc5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vse.nxv1i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,12 +19,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -62,11 +51,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -82,12 +66,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -104,11 +82,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -124,12 +97,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -146,11 +113,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -166,12 +128,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -208,12 +159,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -230,11 +175,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -250,12 +190,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -272,11 +206,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -292,12 +221,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -314,11 +237,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -334,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -356,11 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -376,12 +283,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -398,11 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +314,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,11 +330,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -460,12 +345,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -482,11 +361,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -502,12 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -524,11 +392,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -544,12 +407,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -566,11 +423,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -586,12 +438,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -608,11 +454,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -628,12 +469,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -650,11 +485,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -670,12 +500,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,12 +531,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -734,11 +547,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +562,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,11 +578,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -796,12 +593,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -818,11 +609,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -838,12 +624,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -860,11 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -880,12 +655,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -902,11 +671,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,12 +686,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +702,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -964,12 +717,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -986,11 +733,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1006,12 +748,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +764,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1048,12 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1070,11 +795,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1090,12 +810,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1112,11 +826,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +841,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1154,11 +857,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1174,12 +872,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1196,11 +888,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1216,12 +903,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1238,11 +919,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1258,12 +934,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1280,11 +950,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1300,12 +965,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1322,11 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +996,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1364,11 +1012,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1384,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1406,11 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1426,12 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1448,11 +1074,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1468,12 +1089,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1490,11 +1105,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1510,12 +1120,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1532,11 +1136,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv64i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1552,12 +1151,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll index 5237536c07740..0a7e74398ae4b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -global-isel -verify-machineinstrs | FileCheck %s -declare void @llvm.riscv.vsm.nxv1i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv1i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv2i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv2i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv4i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv4i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv8i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv8i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv16i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv16i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv32i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv32i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv64i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv64i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -95,11 +81,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i16( - , - , - iXLen); - ; Make sure we can use the vsetvli from the producing instruction. define void @test_vsetvli_i16( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i16: @@ -117,11 +98,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i32( - , - , - iXLen); - define void @test_vsetvli_i32( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll index 4963d91a14988..ba4851e18b6fb 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll index 7ea2e1734e5a2..334265feaf19b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll index b7609ff5fd1cd..94285ae6c2615 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1309,12 +945,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1331,13 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1355,12 +978,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1377,13 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1401,12 +1011,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1447,12 +1044,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1469,13 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1493,12 +1077,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1515,13 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1539,12 +1110,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1561,13 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1585,12 +1143,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1607,13 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1631,12 +1176,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1653,13 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1677,12 +1209,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1699,13 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll index 9bd272a368d20..80aeb52857036 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll index 7cd15454d40b9..660b78e4685e4 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll b/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll index d634cc9f6395c..6e9b263a1a6b4 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll @@ -7,9 +7,6 @@ ; Basic shift support is tested as part of ALU.ll. This file ensures that ; shifts which may not be supported natively are lowered properly. -declare i64 @llvm.fshr.i64(i64, i64, i64) -declare i128 @llvm.fshr.i128(i128, i128, i128) - define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll index bb96ba7e5b1fb..6345011e3d9ce 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll @@ -31,9 +31,6 @@ ; The nounwind attribute is omitted for some of the tests, to check that CFI ; directives are correctly generated. -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - declare void @notdead(ptr) ; Although frontends are recommended to not generate va_arg due to the lack of @@ -1214,8 +1211,6 @@ define void @va3_caller() nounwind { ret void } -declare void @llvm.va_copy(ptr, ptr) - define iXLen @va4_va_copy(i32 %argno, ...) nounwind { ; RV32-LABEL: va4_va_copy: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/abds-neg.ll b/llvm/test/CodeGen/RISCV/abds-neg.ll index 41f73f51fe7b6..10d40ca4c774e 100644 --- a/llvm/test/CodeGen/RISCV/abds-neg.ll +++ b/llvm/test/CodeGen/RISCV/abds-neg.ll @@ -2670,18 +2670,3 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind { ret i128 %nabs } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.smax.i8(i8, i8) -declare i16 @llvm.smax.i16(i16, i16) -declare i32 @llvm.smax.i32(i32, i32) -declare i64 @llvm.smax.i64(i64, i64) - -declare i8 @llvm.smin.i8(i8, i8) -declare i16 @llvm.smin.i16(i16, i16) -declare i32 @llvm.smin.i32(i32, i32) -declare i64 @llvm.smin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll index f11a9c854c465..b89885bc32dba 100644 --- a/llvm/test/CodeGen/RISCV/abds.ll +++ b/llvm/test/CodeGen/RISCV/abds.ll @@ -2701,18 +2701,3 @@ define i128 @abd_select_i128(i128 %a, i128 %b) nounwind { ret i128 %sub } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.smax.i8(i8, i8) -declare i16 @llvm.smax.i16(i16, i16) -declare i32 @llvm.smax.i32(i32, i32) -declare i64 @llvm.smax.i64(i64, i64) - -declare i8 @llvm.smin.i8(i8, i8) -declare i16 @llvm.smin.i16(i16, i16) -declare i32 @llvm.smin.i32(i32, i32) -declare i64 @llvm.smin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll index 713b52f53e3d9..e362c1819f4be 100644 --- a/llvm/test/CodeGen/RISCV/abdu-neg.ll +++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll @@ -1941,18 +1941,3 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind { ret i128 %sel } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.umax.i8(i8, i8) -declare i16 @llvm.umax.i16(i16, i16) -declare i32 @llvm.umax.i32(i32, i32) -declare i64 @llvm.umax.i64(i64, i64) - -declare i8 @llvm.umin.i8(i8, i8) -declare i16 @llvm.umin.i16(i16, i16) -declare i32 @llvm.umin.i32(i32, i32) -declare i64 @llvm.umin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll index 6ef172a6cd618..37c46e3370521 100644 --- a/llvm/test/CodeGen/RISCV/abdu.ll +++ b/llvm/test/CodeGen/RISCV/abdu.ll @@ -2114,21 +2114,6 @@ define i128 @abd_select_i128(i128 %a, i128 %b) nounwind { ret i128 %sub } -declare i8 @llvm.abs.i8(i8, i1) -declare i16 @llvm.abs.i16(i16, i1) -declare i32 @llvm.abs.i32(i32, i1) -declare i64 @llvm.abs.i64(i64, i1) -declare i128 @llvm.abs.i128(i128, i1) - -declare i8 @llvm.umax.i8(i8, i8) -declare i16 @llvm.umax.i16(i16, i16) -declare i32 @llvm.umax.i32(i32, i32) -declare i64 @llvm.umax.i64(i64, i64) - -declare i8 @llvm.umin.i8(i8, i8) -declare i16 @llvm.umin.i16(i16, i16) -declare i32 @llvm.umin.i32(i32, i32) -declare i64 @llvm.umin.i64(i64, i64) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK: {{.*}} ; NOZBB: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/addcarry.ll b/llvm/test/CodeGen/RISCV/addcarry.ll index ff0d1e75c746c..153c97faddec8 100644 --- a/llvm/test/CodeGen/RISCV/addcarry.ll +++ b/llvm/test/CodeGen/RISCV/addcarry.ll @@ -4,9 +4,6 @@ ; Test ADDCARRY node expansion on a target that does not currently support ADDCARRY. ; Signed fixed point multiplication eventually expands down to an ADDCARRY. -declare i64 @llvm.smul.fix.i64 (i64, i64, i32) -declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) - define i64 @addcarry(i64 %x, i64 %y) nounwind { ; RISCV32-LABEL: addcarry: ; RISCV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll index 975fc93c830af..9ea5471e4c633 100644 --- a/llvm/test/CodeGen/RISCV/alloca.ll +++ b/llvm/test/CodeGen/RISCV/alloca.ll @@ -29,9 +29,6 @@ define void @simple_alloca(i32 %n) nounwind { ret void } -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) - define void @scoped_alloca(i32 %n) nounwind { ; RV32I-LABEL: scoped_alloca: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/allow-check.ll b/llvm/test/CodeGen/RISCV/allow-check.ll index 0ddb5266db8f5..1e0716c9af008 100644 --- a/llvm/test/CodeGen/RISCV/allow-check.ll +++ b/llvm/test/CodeGen/RISCV/allow-check.ll @@ -17,8 +17,6 @@ entry: ret i1 %allow } -declare i1 @llvm.allow.runtime.check(metadata) nounwind - define i1 @test_ubsan() local_unnamed_addr { ; CHECK-LABEL: test_ubsan: ; CHECK: # %bb.0: # %entry @@ -29,4 +27,3 @@ entry: ret i1 %allow } -declare i1 @llvm.allow.ubsan.check(i8) nounwind diff --git a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll index 551d8864033f3..557b4b7c2afa2 100644 --- a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll +++ b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll @@ -2,11 +2,6 @@ ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s -declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) -declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) -declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) -declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) - define i1 @sadd(i32 %a, i32 %b, ptr %c) nounwind { ; RV32I-LABEL: sadd: ; RV32I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll index 7fe5fa7365eb5..74ff20db12b62 100644 --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -157,7 +157,6 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind { ret i32 %1 } - define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind { ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic: ; RV32I: # %bb.0: @@ -7508,7 +7507,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.smax.i32(i32, i32) define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind { ; RV32I-LABEL: atomicrmw_min_i32_monotonic_crossbb: @@ -7720,7 +7718,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.smin.i32(i32, i32) define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind { ; RV32I-LABEL: atomicrmw_umax_i32_monotonic_crossbb: @@ -7904,7 +7901,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.umax.i32(i32, i32) define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind { ; RV32I-LABEL: atomicrmw_umin_i32_monotonic_crossbb: @@ -8122,7 +8118,6 @@ merge: %4 = phi i32 [ %1, %then ], [ %2, %else ] ret i32 %4 } -declare i32 @llvm.umin.i32(i32, i32) define signext i32 @cmpxchg_i32_monotonic_crossbb(ptr %ptr, i32 signext %cmp, i32 signext %val, i1 zeroext %c) nounwind { ; RV32I-LABEL: cmpxchg_i32_monotonic_crossbb: diff --git a/llvm/test/CodeGen/RISCV/bfloat-arith.ll b/llvm/test/CodeGen/RISCV/bfloat-arith.ll index 871b43e61df50..c3bd658a57229 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-arith.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-arith.ll @@ -55,8 +55,6 @@ define bfloat @fdiv_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %1 } -declare bfloat @llvm.sqrt.bf16(bfloat) - define bfloat @fsqrt_bf16(bfloat %a) nounwind { ; CHECK-LABEL: fsqrt_bf16: ; CHECK: # %bb.0: @@ -68,8 +66,6 @@ define bfloat @fsqrt_bf16(bfloat %a) nounwind { ret bfloat %1 } -declare bfloat @llvm.copysign.bf16(bfloat, bfloat) - define bfloat @fsgnj_bf16(bfloat %a, bfloat %b) nounwind { ; RV32IZFBFMIN-LABEL: fsgnj_bf16: ; RV32IZFBFMIN: # %bb.0: @@ -159,8 +155,6 @@ define bfloat @fsgnjn_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %3 } -declare bfloat @llvm.fabs.bf16(bfloat) - define bfloat @fabs_bf16(bfloat %a, bfloat %b) nounwind { ; RV32IZFBFMIN-LABEL: fabs_bf16: ; RV32IZFBFMIN: # %bb.0: @@ -199,8 +193,6 @@ define bfloat @fabs_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %3 } -declare bfloat @llvm.minnum.bf16(bfloat, bfloat) - define bfloat @fmin_bf16(bfloat %a, bfloat %b) nounwind { ; CHECK-LABEL: fmin_bf16: ; CHECK: # %bb.0: @@ -213,8 +205,6 @@ define bfloat @fmin_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %1 } -declare bfloat @llvm.maxnum.bf16(bfloat, bfloat) - define bfloat @fmax_bf16(bfloat %a, bfloat %b) nounwind { ; CHECK-LABEL: fmax_bf16: ; CHECK: # %bb.0: @@ -227,8 +217,6 @@ define bfloat @fmax_bf16(bfloat %a, bfloat %b) nounwind { ret bfloat %1 } -declare bfloat @llvm.fma.bf16(bfloat, bfloat, bfloat) - define bfloat @fmadd_bf16(bfloat %a, bfloat %b, bfloat %c) nounwind { ; CHECK-LABEL: fmadd_bf16: ; CHECK: # %bb.0: @@ -345,7 +333,6 @@ define bfloat @fnmadd_s_3(bfloat %a, bfloat %b, bfloat %c) nounwind { ret bfloat %neg } - define bfloat @fnmadd_nsz(bfloat %a, bfloat %b, bfloat %c) nounwind { ; CHECK-LABEL: fnmadd_nsz: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll index 73ff888e44b3b..3de0753369c01 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll @@ -119,7 +119,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.bf16(bfloat %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.bf16(bfloat) define i16 @fcvt_ui_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_ui_bf16: @@ -209,7 +208,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.bf16(bfloat %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.bf16(bfloat) define i32 @fcvt_w_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_w_bf16: @@ -291,7 +289,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.bf16(bfloat %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.bf16(bfloat) define i32 @fcvt_wu_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_wu_bf16: @@ -419,7 +416,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.bf16(bfloat %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.bf16(bfloat) define i64 @fcvt_l_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_l_bf16: @@ -609,7 +605,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.bf16(bfloat %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.bf16(bfloat) define i64 @fcvt_lu_bf16(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_lu_bf16: @@ -759,7 +754,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.bf16(bfloat %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.bf16(bfloat) define bfloat @fcvt_bf16_si(i16 %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_bf16_si: @@ -1685,7 +1679,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.bf16(bfloat %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.bf16(bfloat) define zeroext i8 @fcvt_wu_s_i8(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_wu_s_i8: @@ -1771,7 +1764,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.bf16(bfloat %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.bf16(bfloat) define zeroext i32 @fcvt_wu_bf16_sat_zext(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_wu_bf16_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/bitreverse-shift.ll b/llvm/test/CodeGen/RISCV/bitreverse-shift.ll index 92610f22c4b72..83e7d1e250c5e 100644 --- a/llvm/test/CodeGen/RISCV/bitreverse-shift.ll +++ b/llvm/test/CodeGen/RISCV/bitreverse-shift.ll @@ -8,11 +8,6 @@ ; fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x) ; fold (bitreverse(shl (bitreverse c), x)) -> (srl c, x) -declare i8 @llvm.bitreverse.i8(i8) -declare i16 @llvm.bitreverse.i16(i16) -declare i32 @llvm.bitreverse.i32(i32) -declare i64 @llvm.bitreverse.i64(i64) - define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind { ; CHECK-LABEL: test_bitreverse_srli_bitreverse_i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll index 1605e686e9177..9450eea5a6666 100644 --- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll @@ -12,14 +12,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64ZB,RV64ZBKB -declare i16 @llvm.bswap.i16(i16) -declare i32 @llvm.bswap.i32(i32) -declare i64 @llvm.bswap.i64(i64) -declare i8 @llvm.bitreverse.i8(i8) -declare i16 @llvm.bitreverse.i16(i16) -declare i32 @llvm.bitreverse.i32(i32) -declare i64 @llvm.bitreverse.i64(i64) - define i16 @test_bswap_i16(i16 %a) nounwind { ; RV32I-LABEL: test_bswap_i16: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/bswap-shift.ll b/llvm/test/CodeGen/RISCV/bswap-shift.ll index 23f32ae327fd9..63fb69b0285e7 100644 --- a/llvm/test/CodeGen/RISCV/bswap-shift.ll +++ b/llvm/test/CodeGen/RISCV/bswap-shift.ll @@ -8,10 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64ZB -declare i16 @llvm.bswap.i16(i16) -declare i32 @llvm.bswap.i32(i32) -declare i64 @llvm.bswap.i64(i64) - define i16 @test_bswap_srli_7_bswap_i16(i16 %a) nounwind { ; RV32ZB-LABEL: test_bswap_srli_7_bswap_i16: ; RV32ZB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/clear-cache.ll b/llvm/test/CodeGen/RISCV/clear-cache.ll index d598a98a330e9..6f26e82f07bd3 100644 --- a/llvm/test/CodeGen/RISCV/clear-cache.ll +++ b/llvm/test/CodeGen/RISCV/clear-cache.ll @@ -6,8 +6,6 @@ ; RUN: llc -mtriple=riscv32-unknown-linux-musl < %s | FileCheck --check-prefix=RV32-LINUX %s ; RUN: llc -mtriple=riscv64-unknown-linux-musl < %s | FileCheck --check-prefix=RV64-LINUX %s -declare void @llvm.clear_cache(ptr, ptr) - define void @foo(ptr %a, ptr %b) nounwind { ; RV32-LABEL: foo: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/copy-frameindex.mir b/llvm/test/CodeGen/RISCV/copy-frameindex.mir index 31ffc3f0f83c6..9a307d44cf21a 100644 --- a/llvm/test/CodeGen/RISCV/copy-frameindex.mir +++ b/llvm/test/CodeGen/RISCV/copy-frameindex.mir @@ -15,8 +15,6 @@ ret void } - declare void @llvm.dbg.value(metadata, metadata, metadata) - !llvm.dbg.cu = !{!0} !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4) diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll index 53de36f1699a9..e2b8840518b0d 100644 --- a/llvm/test/CodeGen/RISCV/copysign-casts.ll +++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll @@ -37,10 +37,6 @@ ; Test fcopysign scenarios where the sign argument is casted to the type of the ; magnitude argument. Those casts can be folded away by the DAGCombiner. -declare double @llvm.copysign.f64(double, double) -declare float @llvm.copysign.f32(float, float) -declare half @llvm.copysign.f16(half, half) - define double @fold_promote_d_s(double %a, float %b) nounwind { ; RV32I-LABEL: fold_promote_d_s: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll index e6b22b2b9deea..976c57e422761 100644 --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -16,19 +16,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+xtheadbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64XTHEADBB -declare i8 @llvm.cttz.i8(i8, i1) -declare i16 @llvm.cttz.i16(i16, i1) -declare i32 @llvm.cttz.i32(i32, i1) -declare i64 @llvm.cttz.i64(i64, i1) -declare i8 @llvm.ctlz.i8(i8, i1) -declare i16 @llvm.ctlz.i16(i16, i1) -declare i32 @llvm.ctlz.i32(i32, i1) -declare i64 @llvm.ctlz.i64(i64, i1) -declare i8 @llvm.ctpop.i8(i8) -declare i16 @llvm.ctpop.i16(i16) -declare i32 @llvm.ctpop.i32(i32) -declare i64 @llvm.ctpop.i64(i64) - define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32_NOZBB-LABEL: test_cttz_i8: ; RV32_NOZBB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll index cb213172c6c88..e92ff1a1b1b40 100644 --- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll +++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll @@ -106,9 +106,6 @@ define signext i32 @ctz_dereferencing_pointer(ptr %b) nounwind { ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - - - entry: %0 = load i64, ptr %b, align 8 %1 = tail call i64 @llvm.cttz.i64(i64 %0, i1 true) @@ -196,9 +193,6 @@ define i64 @ctz_dereferencing_pointer_zext(ptr %b) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = load i32, ptr %b, align 8 %1 = tail call i32 @llvm.cttz.i32(i32 %0, i1 true) @@ -281,9 +275,6 @@ define signext i32 @ctz1(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp eq i32 %x, 0 @@ -364,9 +355,6 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp ne i32 %x, 0 @@ -444,9 +432,6 @@ define signext i32 @ctz2(i32 signext %x) nounwind { ; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false) ret i32 %0 @@ -522,9 +507,6 @@ define signext i32 @ctz3(i32 signext %x) nounwind { ; RV64I-NEXT: li a0, 32 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false) ret i32 %0 @@ -626,9 +608,6 @@ define signext i32 @ctz4(i64 %b) nounwind { ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret - - - entry: %0 = tail call i64 @llvm.cttz.i64(i64 %b, i1 true) %1 = icmp eq i64 %b, 0 @@ -773,9 +752,6 @@ define signext i32 @ctlz(i64 %b) nounwind { ; RV64I-NEXT: srli a0, a0, 58 ; RV64I-NEXT: ret - - - entry: %0 = tail call i64 @llvm.ctlz.i64(i64 %b, i1 true) %1 = icmp eq i64 %b, 0 @@ -857,9 +833,6 @@ define signext i32 @ctz5(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp eq i32 %x, 0 @@ -940,9 +913,6 @@ define signext i32 @ctz6(i32 signext %x) nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true) %1 = icmp eq i32 %x, 0 @@ -1030,9 +1000,6 @@ define signext i32 @globalVar() nounwind { ; RV64I-NEXT: andi a0, a0, 31 ; RV64I-NEXT: ret - - - entry: %0 = load i32, ptr @global_x, align 4 %1 = tail call i32 @llvm.cttz.i32(i32 %0, i1 true) @@ -1803,6 +1770,3 @@ define i32 @test_ctlz_select_i32(i32 %0) { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1 immarg) -declare i32 @llvm.cttz.i32(i32, i1 immarg) -declare i64 @llvm.ctlz.i64(i64, i1 immarg) diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll index 4e48e54b3ca81..0071f3c168964 100644 --- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll @@ -52,7 +52,6 @@ define double @fadd_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) define double @fsub_d(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fsub_d: @@ -90,7 +89,6 @@ define double @fsub_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata) define double @fmul_d(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fmul_d: @@ -128,7 +126,6 @@ define double @fmul_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) define double @fdiv_d(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fdiv_d: @@ -166,7 +163,6 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) define double @fsqrt_d(double %a) nounwind strictfp { ; CHECKIFD-LABEL: fsqrt_d: @@ -204,7 +200,6 @@ define double @fsqrt_d(double %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) define double @fmin_d(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: fmin_d: @@ -263,7 +258,6 @@ define double @fmin_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.minnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) strictfp define double @fmax_d(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: fmax_d: @@ -322,7 +316,6 @@ define double @fmax_d(double %a, double %b) nounwind strictfp { %1 = call double @llvm.experimental.constrained.maxnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) strictfp define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp { ; CHECKIFD-LABEL: fmadd_d: @@ -360,7 +353,6 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) strictfp define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp { ; RV32IFD-LABEL: fmsub_d: diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index f960bc19c57c3..ec66d17f96980 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -165,8 +165,6 @@ define double @fdiv_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.sqrt.f64(double) - define double @fsqrt_d(double %a) nounwind { ; CHECKIFD-LABEL: fsqrt_d: ; CHECKIFD: # %bb.0: @@ -204,8 +202,6 @@ define double @fsqrt_d(double %a) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @fsgnj_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fsgnj_d: ; CHECKIFD: # %bb.0: @@ -344,8 +340,6 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ret double %2 } -declare double @llvm.fabs.f64(double) - ; This function performs extra work to ensure that ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. define double @fabs_d(double %a, double %b) nounwind { @@ -402,8 +396,6 @@ define double @fabs_d(double %a, double %b) nounwind { ret double %3 } -declare double @llvm.minnum.f64(double, double) - define double @fmin_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmin_d: ; CHECKIFD: # %bb.0: @@ -441,8 +433,6 @@ define double @fmin_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @fmax_d(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmax_d: ; CHECKIFD: # %bb.0: @@ -480,8 +470,6 @@ define double @fmax_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fmadd_d(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmadd_d: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll index 14193bf4cb169..d4bd69b06a298 100644 --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -61,8 +61,6 @@ define double @fneg(double %a) nounwind { ret double %1 } -declare double @llvm.fabs.f64(double) - define double @fabs(double %a) nounwind { ; RV32I-LABEL: fabs: ; RV32I: # %bb.0: @@ -101,8 +99,6 @@ define double @fabs(double %a) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - ; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise ; operations if floating point isn't supported. A combine could be written to ; do the same even when f64 is legal. diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll index 9a5e357b05a17..eb31c5a110cd2 100644 --- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll @@ -56,7 +56,6 @@ define float @fcvt_s_d(double %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) define double @fcvt_d_s(float %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_s: @@ -94,7 +93,6 @@ define double @fcvt_d_s(float %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) define i32 @fcvt_w_d(double %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_w_d: @@ -132,7 +130,6 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case ; because fptosi will produce poison if the result doesn't fit into an i32. @@ -172,7 +169,6 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) ; Test where the fptoui has multiple uses, one of which causes a sext to be ; inserted on RV64. @@ -262,7 +258,6 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata) define double @fcvt_d_w_load(ptr %p) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_w_load: @@ -344,7 +339,6 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata) define double @fcvt_d_wu_load(ptr %p) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_wu_load: @@ -438,7 +432,6 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata) define i64 @fcvt_lu_d(double %a) nounwind strictfp { ; RV32IFD-LABEL: fcvt_lu_d: @@ -489,7 +482,6 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata) define double @fcvt_d_l(i64 %a) nounwind strictfp { ; RV32IFD-LABEL: fcvt_d_l: @@ -540,7 +532,6 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata) define double @fcvt_d_lu(i64 %a) nounwind strictfp { ; RV32IFD-LABEL: fcvt_d_lu: @@ -591,7 +582,6 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata) define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_w_i8: @@ -629,7 +619,6 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata) define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_wu_i8: @@ -667,7 +656,6 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata) define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_w_i16: @@ -705,7 +693,6 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata) define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp { ; CHECKIFD-LABEL: fcvt_d_wu_i16: @@ -743,7 +730,6 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata) ; Make sure we select W version of addi on RV64. define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp { diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll index c3e729800616d..eb03f8ee1d532 100644 --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -251,7 +251,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.f64(double) ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case ; because fptosi will produce poison if the result doesn't fit into an i32. @@ -460,7 +459,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.f64(double) define double @fcvt_d_w(i32 %a) nounwind { ; CHECKIFD-LABEL: fcvt_d_w: @@ -885,7 +883,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.f64(double) define i64 @fcvt_lu_d(double %a) nounwind { ; RV32IFD-LABEL: fcvt_lu_d: @@ -1077,7 +1074,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.f64(double) define i64 @fmv_x_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fmv_x_d: @@ -1783,7 +1779,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.f64(double %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.f64(double) define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind { ; RV32IFD-LABEL: fcvt_wu_s_i16: @@ -1954,7 +1949,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.f64(double %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.f64(double) define signext i8 @fcvt_w_s_i8(double %a) nounwind { ; RV32IFD-LABEL: fcvt_w_s_i8: @@ -2158,7 +2152,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.f64(double %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.f64(double) define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind { ; @@ -2327,7 +2320,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.f64(double %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.f64(double) define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind { ; RV32IFD-LABEL: fcvt_wu_d_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll index b1c63af3e7e07..610f34dba7397 100644 --- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll @@ -55,7 +55,6 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fcmp_ogt: @@ -871,7 +870,6 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp { ; CHECKIFD-LABEL: fcmps_ogt: diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll index fddb86de58f51..117a00dce4b10 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll @@ -18,8 +18,6 @@ ; RUN: -verify-machineinstrs -disable-strictnode-mutation \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) - define double @sqrt_f64(double %a) nounwind strictfp { ; CHECKIFD-LABEL: sqrt_f64: ; CHECKIFD: # %bb.0: @@ -57,8 +55,6 @@ define double @sqrt_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata) - define double @powi_f64(double %a, i32 %b) nounwind strictfp { ; RV32IFD-LABEL: powi_f64: ; RV32IFD: # %bb.0: @@ -120,8 +116,6 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata) - define double @sin_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: sin_f64: ; RV32IFD: # %bb.0: @@ -180,8 +174,6 @@ define double @sin_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata) - define double @cos_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: cos_f64: ; RV32IFD: # %bb.0: @@ -375,8 +367,6 @@ define double @sincos_f64(double %a) nounwind strictfp { ret double %3 } -declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata) - define double @tan_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: tan_f64: ; RV32IFD: # %bb.0: @@ -609,8 +599,6 @@ define double @atan_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata) - define double @atan2_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: atan2_f64: ; RV32IFD: # %bb.0: @@ -843,8 +831,6 @@ define double @tanh_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata) - define double @pow_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: pow_f64: ; RV32IFD: # %bb.0: @@ -903,8 +889,6 @@ define double @pow_f64(double %a, double %b) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata) - define double @exp_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: exp_f64: ; RV32IFD: # %bb.0: @@ -963,8 +947,6 @@ define double @exp_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata) - define double @exp2_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: exp2_f64: ; RV32IFD: # %bb.0: @@ -1023,8 +1005,6 @@ define double @exp2_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata) - define double @log_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: log_f64: ; RV32IFD: # %bb.0: @@ -1083,8 +1063,6 @@ define double @log_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata) - define double @log10_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: log10_f64: ; RV32IFD: # %bb.0: @@ -1143,8 +1121,6 @@ define double @log10_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) - define double @log2_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: log2_f64: ; RV32IFD: # %bb.0: @@ -1203,8 +1179,6 @@ define double @log2_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) - define double @fma_f64(double %a, double %b, double %c) nounwind strictfp { ; CHECKIFD-LABEL: fma_f64: ; CHECKIFD: # %bb.0: @@ -1242,8 +1216,6 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata) - define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp { ; CHECKIFD-LABEL: fmuladd_f64: ; CHECKIFD: # %bb.0: @@ -1295,8 +1267,6 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) - define double @minnum_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: minnum_f64: ; RV32IFD: # %bb.0: @@ -1355,8 +1325,6 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) - define double @maxnum_f64(double %a, double %b) nounwind strictfp { ; RV32IFD-LABEL: maxnum_f64: ; RV32IFD: # %bb.0: @@ -1432,8 +1400,6 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp { ; ret double %1 ; } -declare double @llvm.experimental.constrained.floor.f64(double, metadata) - define double @floor_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: floor_f64: ; RV32IFD: # %bb.0: @@ -1492,8 +1458,6 @@ define double @floor_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.ceil.f64(double, metadata) - define double @ceil_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: ceil_f64: ; RV32IFD: # %bb.0: @@ -1552,8 +1516,6 @@ define double @ceil_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.trunc.f64(double, metadata) - define double @trunc_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -1612,8 +1574,6 @@ define double @trunc_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) - define double @rint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: rint_f64: ; RV32IFD: # %bb.0: @@ -1672,8 +1632,6 @@ define double @rint_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) - define double @nearbyint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: nearbyint_f64: ; RV32IFD: # %bb.0: @@ -1732,8 +1690,6 @@ define double @nearbyint_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.round.f64(double, metadata) - define double @round_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: round_f64: ; RV32IFD: # %bb.0: @@ -1792,8 +1748,6 @@ define double @round_f64(double %a) nounwind strictfp { ret double %1 } -declare double @llvm.experimental.constrained.roundeven.f64(double, metadata) - define double @roundeven_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: roundeven_f64: ; RV32IFD: # %bb.0: @@ -1852,8 +1806,6 @@ define double @roundeven_f64(double %a) nounwind strictfp { ret double %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f64(double, metadata, metadata) - define iXLen @lrint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: lrint_f64: ; RV32IFD: # %bb.0: @@ -1896,8 +1848,6 @@ define iXLen @lrint_f64(double %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f64(double, metadata) - define iXLen @lround_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: lround_f64: ; RV32IFD: # %bb.0: @@ -1940,8 +1890,6 @@ define iXLen @lround_f64(double %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata) - define i64 @llrint_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: llrint_f64: ; RV32IFD: # %bb.0: @@ -1992,8 +1940,6 @@ define i64 @llrint_f64(double %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) - define i64 @llround_f64(double %a) nounwind strictfp { ; RV32IFD-LABEL: llround_f64: ; RV32IFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index aaa08b577c4f4..81e6d84af17cb 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -16,8 +16,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \ ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64I %s -declare double @llvm.sqrt.f64(double) - define double @sqrt_f64(double %a) nounwind { ; CHECKIFD-LABEL: sqrt_f64: ; CHECKIFD: # %bb.0: @@ -55,8 +53,6 @@ define double @sqrt_f64(double %a) nounwind { ret double %1 } -declare double @llvm.powi.f64.i32(double, i32) - define double @powi_f64(double %a, i32 %b) nounwind { ; RV32IFD-LABEL: powi_f64: ; RV32IFD: # %bb.0: @@ -113,8 +109,6 @@ define double @powi_f64(double %a, i32 %b) nounwind { ret double %1 } -declare double @llvm.sin.f64(double) - define double @sin_f64(double %a) nounwind { ; CHECKIFD-LABEL: sin_f64: ; CHECKIFD: # %bb.0: @@ -154,8 +148,6 @@ define double @sin_f64(double %a) nounwind { ret double %1 } -declare double @llvm.cos.f64(double) - define double @cos_f64(double %a) nounwind { ; CHECKIFD-LABEL: cos_f64: ; CHECKIFD: # %bb.0: @@ -330,8 +322,6 @@ define double @sincos_f64(double %a) nounwind { ret double %3 } -declare double @llvm.pow.f64(double, double) - define double @pow_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: pow_f64: ; CHECKIFD: # %bb.0: @@ -371,8 +361,6 @@ define double @pow_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.exp.f64(double) - define double @exp_f64(double %a) nounwind { ; CHECKIFD-LABEL: exp_f64: ; CHECKIFD: # %bb.0: @@ -412,8 +400,6 @@ define double @exp_f64(double %a) nounwind { ret double %1 } -declare double @llvm.exp2.f64(double) - define double @exp2_f64(double %a) nounwind { ; CHECKIFD-LABEL: exp2_f64: ; CHECKIFD: # %bb.0: @@ -492,8 +478,6 @@ define double @exp10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log.f64(double) - define double @log_f64(double %a) nounwind { ; CHECKIFD-LABEL: log_f64: ; CHECKIFD: # %bb.0: @@ -533,8 +517,6 @@ define double @log_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log10.f64(double) - define double @log10_f64(double %a) nounwind { ; CHECKIFD-LABEL: log10_f64: ; CHECKIFD: # %bb.0: @@ -574,8 +556,6 @@ define double @log10_f64(double %a) nounwind { ret double %1 } -declare double @llvm.log2.f64(double) - define double @log2_f64(double %a) nounwind { ; CHECKIFD-LABEL: log2_f64: ; CHECKIFD: # %bb.0: @@ -615,8 +595,6 @@ define double @log2_f64(double %a) nounwind { ret double %1 } -declare double @llvm.fma.f64(double, double, double) - define double @fma_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fma_f64: ; CHECKIFD: # %bb.0: @@ -654,8 +632,6 @@ define double @fma_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fmuladd.f64(double, double, double) - define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; CHECKIFD-LABEL: fmuladd_f64: ; CHECKIFD: # %bb.0: @@ -707,8 +683,6 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ret double %1 } -declare double @llvm.fabs.f64(double) - define double @fabs_f64(double %a) nounwind { ; CHECKIFD-LABEL: fabs_f64: ; CHECKIFD: # %bb.0: @@ -740,8 +714,6 @@ define double @fabs_f64(double %a) nounwind { ret double %1 } -declare double @llvm.minnum.f64(double, double) - define double @minnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: minnum_f64: ; CHECKIFD: # %bb.0: @@ -779,8 +751,6 @@ define double @minnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maxnum.f64(double, double) - define double @maxnum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: maxnum_f64: ; CHECKIFD: # %bb.0: @@ -818,8 +788,6 @@ define double @maxnum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.copysign.f64(double, double) - define double @copysign_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: copysign_f64: ; CHECKIFD: # %bb.0: @@ -857,8 +825,6 @@ define double @copysign_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.floor.f64(double) - define double @floor_f64(double %a) nounwind { ; RV32IFD-LABEL: floor_f64: ; RV32IFD: # %bb.0: @@ -923,8 +889,6 @@ define double @floor_f64(double %a) nounwind { ret double %1 } -declare double @llvm.ceil.f64(double) - define double @ceil_f64(double %a) nounwind { ; RV32IFD-LABEL: ceil_f64: ; RV32IFD: # %bb.0: @@ -989,8 +953,6 @@ define double @ceil_f64(double %a) nounwind { ret double %1 } -declare double @llvm.trunc.f64(double) - define double @trunc_f64(double %a) nounwind { ; RV32IFD-LABEL: trunc_f64: ; RV32IFD: # %bb.0: @@ -1055,8 +1017,6 @@ define double @trunc_f64(double %a) nounwind { ret double %1 } -declare double @llvm.rint.f64(double) - define double @rint_f64(double %a) nounwind { ; RV32IFD-LABEL: rint_f64: ; RV32IFD: # %bb.0: @@ -1121,8 +1081,6 @@ define double @rint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.nearbyint.f64(double) - define double @nearbyint_f64(double %a) nounwind { ; CHECKIFD-LABEL: nearbyint_f64: ; CHECKIFD: # %bb.0: @@ -1162,8 +1120,6 @@ define double @nearbyint_f64(double %a) nounwind { ret double %1 } -declare double @llvm.round.f64(double) - define double @round_f64(double %a) nounwind { ; RV32IFD-LABEL: round_f64: ; RV32IFD: # %bb.0: @@ -1228,8 +1184,6 @@ define double @round_f64(double %a) nounwind { ret double %1 } -declare double @llvm.roundeven.f64(double) - define double @roundeven_f64(double %a) nounwind { ; RV32IFD-LABEL: roundeven_f64: ; RV32IFD: # %bb.0: @@ -1294,8 +1248,6 @@ define double @roundeven_f64(double %a) nounwind { ret double %1 } -declare iXLen @llvm.lrint.iXLen.f64(double) - define iXLen @lrint_f64(double %a) nounwind { ; RV32IFD-LABEL: lrint_f64: ; RV32IFD: # %bb.0: @@ -1338,9 +1290,6 @@ define iXLen @lrint_f64(double %a) nounwind { ret iXLen %1 } -declare i32 @llvm.lround.i32.f64(double) -declare i64 @llvm.lround.i64.f64(double) - define iXLen @lround_f64(double %a) nounwind { ; RV32IFD-LABEL: lround_f64: ; RV32IFD: # %bb.0: @@ -1420,8 +1369,6 @@ define i32 @lround_i32_f64(double %a) nounwind { ret i32 %1 } -declare i64 @llvm.llrint.i64.f64(double) - define i64 @llrint_f64(double %a) nounwind { ; RV32IFD-LABEL: llrint_f64: ; RV32IFD: # %bb.0: @@ -1472,8 +1419,6 @@ define i64 @llrint_f64(double %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f64(double) - define i64 @llround_f64(double %a) nounwind { ; RV32IFD-LABEL: llround_f64: ; RV32IFD: # %bb.0: @@ -1524,7 +1469,6 @@ define i64 @llround_f64(double %a) nounwind { ret i64 %1 } -declare i1 @llvm.is.fpclass.f64(double, i32) define i1 @isnan_d_fpclass(double %x) { ; CHECKIFD-LABEL: isnan_d_fpclass: ; CHECKIFD: # %bb.0: @@ -1611,8 +1555,6 @@ define double @tan_f64(double %a) nounwind { ret double %1 } -declare double @llvm.maximumnum.f64(double, double) - define double @maximumnum_double(double %x, double %y) { ; CHECKIFD-LABEL: maximumnum_double: ; CHECKIFD: # %bb.0: @@ -1658,8 +1600,6 @@ define double @maximumnum_double(double %x, double %y) { ret double %z } -declare double @llvm.minimumnum.f64(double, double) - define double @minimumnum_double(double %x, double %y) { ; CHECKIFD-LABEL: minimumnum_double: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll index 6202e92b4dc65..8b509e901e833 100644 --- a/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll +++ b/llvm/test/CodeGen/RISCV/double-maximum-minimum.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s -declare double @llvm.minimum.f64(double, double) - define double @fminimum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fminimum_f64: ; CHECKIFD: # %bb.0: @@ -75,8 +73,6 @@ define double @fminimum_f64(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maximum.f64(double, double) - define double @fmaximum_f64(double %a, double %b) nounwind { ; CHECKIFD-LABEL: fmaximum_f64: ; CHECKIFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll index a574e68671a74..1fb3d34907caa 100644 --- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll @@ -1502,13 +1502,3 @@ define i64 @test_rint_ui64(double %x) nounwind { ret i64 %b } -declare double @llvm.floor.f64(double) -declare double @llvm.ceil.f64(double) -declare double @llvm.trunc.f64(double) -declare double @llvm.round.f64(double) -declare double @llvm.roundeven.f64(double) -declare double @llvm.rint.f64(double) -declare i32 @llvm.fptosi.sat.i32.f64(double) -declare i64 @llvm.fptosi.sat.i64.f64(double) -declare i32 @llvm.fptoui.sat.i32.f64(double) -declare i64 @llvm.fptoui.sat.i64.f64(double) diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll index 6dd24c056e386..43fe87111d7a3 100644 --- a/llvm/test/CodeGen/RISCV/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll @@ -1388,8 +1388,3 @@ define double @test_roundeven_double(double %x) { ret double %a } -declare double @llvm.floor.f64(double) -declare double @llvm.ceil.f64(double) -declare double @llvm.trunc.f64(double) -declare double @llvm.round.f64(double) -declare double @llvm.roundeven.f64(double) diff --git a/llvm/test/CodeGen/RISCV/double-zfa.ll b/llvm/test/CodeGen/RISCV/double-zfa.ll index f17c63ddb6cae..a93ec86e363fc 100644 --- a/llvm/test/CodeGen/RISCV/double-zfa.ll +++ b/llvm/test/CodeGen/RISCV/double-zfa.ll @@ -183,8 +183,6 @@ define double @loadfpimm18() { ret double 0x8010000000000000 } -declare double @llvm.minimum.f64(double, double) - define double @fminm_d(double %a, double %b) nounwind { ; CHECK-LABEL: fminm_d: ; CHECK: # %bb.0: @@ -194,8 +192,6 @@ define double @fminm_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.maximum.f64(double, double) - define double @fmaxm_d(double %a, double %b) nounwind { ; CHECK-LABEL: fmaxm_d: ; CHECK: # %bb.0: @@ -216,7 +212,6 @@ define double @fround_d_1(double %a) nounwind { declare double @round(double) nounwind readnone - define double @fround_d_2(double %a) nounwind { ; CHECK-LABEL: fround_d_2: ; CHECK: # %bb.0: @@ -228,7 +223,6 @@ define double @fround_d_2(double %a) nounwind { declare double @floor(double) nounwind readnone - define double @fround_d_3(double %a) nounwind { ; CHECK-LABEL: fround_d_3: ; CHECK: # %bb.0: @@ -240,7 +234,6 @@ define double @fround_d_3(double %a) nounwind { declare double @ceil(double) nounwind readnone - define double @fround_d_4(double %a) nounwind { ; CHECK-LABEL: fround_d_4: ; CHECK: # %bb.0: @@ -252,7 +245,6 @@ define double @fround_d_4(double %a) nounwind { declare double @trunc(double) nounwind readnone - define double @fround_d_5(double %a) nounwind { ; CHECK-LABEL: fround_d_5: ; CHECK: # %bb.0: @@ -273,9 +265,6 @@ define double @fround_d_6(double %a) nounwind { ret double %call } -declare double @llvm.roundeven.f64(double) nounwind readnone - - define double @froundnx_d(double %a) nounwind { ; CHECK-LABEL: froundnx_d: ; CHECK: # %bb.0: @@ -287,8 +276,6 @@ define double @froundnx_d(double %a) nounwind { declare double @rint(double) nounwind readnone -declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) - define i32 @fcmp_olt_q(double %a, double %b) nounwind strictfp { ; CHECK-LABEL: fcmp_olt_q: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll index cecdd77a079e4..6f25892fce20f 100644 --- a/llvm/test/CodeGen/RISCV/double_reduct.ll +++ b/llvm/test/CodeGen/RISCV/double_reduct.ll @@ -69,7 +69,6 @@ define float @fmax_f32(<4 x float> %a, <4 x float> %b) { ret float %r } - define i32 @add_i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: add_i32: ; CHECK: # %bb.0: @@ -261,24 +260,3 @@ define i32 @smax_i32(<4 x i32> %a, <4 x i32> %b) { ret i32 %r } -declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>) -declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>) -declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) -declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) -declare i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32>) -declare i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16>) -declare i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16>) -declare i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32>) -declare i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32>) -declare float @llvm.minnum.f32(float, float) -declare float @llvm.maxnum.f32(float, float) -declare i32 @llvm.umin.i32(i32, i32) -declare i32 @llvm.umax.i32(i32, i32) -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.smax.i32(i32, i32) diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll index 85867a4ab2c6f..d785e4c4ac29e 100644 --- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll @@ -95,16 +95,3 @@ entry: ret void } -declare @llvm.riscv.vle.nxv8i16.i64(, ptr nocapture, i64) - -declare @llvm.riscv.vle.nxv8i8.i64(, ptr nocapture, i64) - -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) - -declare @llvm.riscv.vmsbc.nxv8i16.i16.i64(, i16, i64) - -declare @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(, , , i64, i64 immarg) - -declare target("riscv.vector.tuple", , 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), , i32) - -declare void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", , 4), ptr nocapture, i64, i64) diff --git a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll index 62dd3fe1e2f30..202451b698c48 100644 --- a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll +++ b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll @@ -38,4 +38,3 @@ entry: declare void @foo(ptr) -declare ptr @llvm.eh.dwarf.cfa(i32) nounwind diff --git a/llvm/test/CodeGen/RISCV/fixed-csr.ll b/llvm/test/CodeGen/RISCV/fixed-csr.ll index f39085132e4a2..406c2afcc63d9 100644 --- a/llvm/test/CodeGen/RISCV/fixed-csr.ll +++ b/llvm/test/CodeGen/RISCV/fixed-csr.ll @@ -11,8 +11,6 @@ define noundef signext i32 @foo() { ret i32 0 } -declare void @llvm.write_register.i64(metadata, i64) - define noundef signext i32 @bar() nounwind { ; CHECK-LABEL: bar: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll index 90ce034eafd3b..6a47c3f3c3926 100644 --- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll @@ -47,7 +47,6 @@ define float @fadd_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) define float @fsub_s(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fsub_s: @@ -80,7 +79,6 @@ define float @fsub_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) define float @fmul_s(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fmul_s: @@ -113,7 +111,6 @@ define float @fmul_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) define float @fdiv_s(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fdiv_s: @@ -146,7 +143,6 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) define float @fsqrt_s(float %a) nounwind strictfp { ; CHECKIF-LABEL: fsqrt_s: @@ -179,7 +175,6 @@ define float @fsqrt_s(float %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) define float @fmin_s(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: fmin_s: @@ -238,7 +233,6 @@ define float @fmin_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) strictfp define float @fmax_s(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: fmax_s: @@ -297,7 +291,6 @@ define float @fmax_s(float %a, float %b) nounwind strictfp { %1 = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) strictfp define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fmadd_s: @@ -330,7 +323,6 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) strictfp define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fmsub_s: diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll index 95f1fc6899206..af9e996fa2ef1 100644 --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -145,8 +145,6 @@ define float @fdiv_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.sqrt.f32(float) - define float @fsqrt_s(float %a) nounwind { ; CHECKIF-LABEL: fsqrt_s: ; CHECKIF: # %bb.0: @@ -179,8 +177,6 @@ define float @fsqrt_s(float %a) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - define float @fsgnj_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fsgnj_s: ; CHECKIF: # %bb.0: @@ -316,8 +312,6 @@ define float @fsgnjn_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.fabs.f32(float) - define float @fabs_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fabs_s: ; CHECKIF: # %bb.0: @@ -364,8 +358,6 @@ define float @fabs_s(float %a, float %b) nounwind { ret float %3 } -declare float @llvm.minnum.f32(float, float) - define float @fmin_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmin_s: ; CHECKIF: # %bb.0: @@ -398,8 +390,6 @@ define float @fmin_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maxnum.f32(float, float) - define float @fmax_s(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmax_s: ; CHECKIF: # %bb.0: @@ -432,8 +422,6 @@ define float @fmax_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fmadd_s(float %a, float %b, float %c) nounwind { ; CHECKIF-LABEL: fmadd_s: ; CHECKIF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll index aaeb1b7c0b1fb..ff2eab615a87e 100644 --- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll @@ -56,8 +56,6 @@ define float @fneg(float %a) nounwind { ret float %1 } -declare float @llvm.fabs.f32(float) - define float @fabs(float %a) nounwind { ; RV32I-LABEL: fabs: ; RV32I: # %bb.0: @@ -96,8 +94,6 @@ define float @fabs(float %a) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - ; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise ; operations if floating point isn't supported. A combine could be written to ; do the same even when f32 is legal. diff --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll index 1b25a2b64f4d3..8daaf83c0f3c0 100644 --- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll @@ -51,7 +51,6 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) define i32 @fcvt_wu_s(float %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_wu_s: @@ -84,7 +83,6 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) ; Test where the fptoui has multiple uses, one of which causes a sext to be ; inserted on RV64. @@ -162,7 +160,6 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) define float @fcvt_s_w_load(ptr %p) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_w_load: @@ -233,7 +230,6 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata) define float @fcvt_s_wu_load(ptr %p) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_wu_load: @@ -321,7 +317,6 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata) define i64 @fcvt_lu_s(float %a) nounwind strictfp { ; RV32IF-LABEL: fcvt_lu_s: @@ -372,7 +367,6 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata) define float @fcvt_s_l(i64 %a) nounwind strictfp { ; RV32IF-LABEL: fcvt_s_l: @@ -423,7 +417,6 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata) define float @fcvt_s_lu(i64 %a) nounwind strictfp { ; RV32IF-LABEL: fcvt_s_lu: @@ -474,7 +467,6 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata) define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_w_i8: @@ -507,7 +499,6 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata) define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_wu_i8: @@ -540,7 +531,6 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata) define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_w_i16: @@ -573,7 +563,6 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata) define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp { ; CHECKIF-LABEL: fcvt_s_wu_i16: @@ -606,7 +595,6 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata) ; Make sure we select W version of addi on RV64. define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp { diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll index e6e4f6642f685..4a637bf4ae327 100644 --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -150,7 +150,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.f32(float) define i32 @fcvt_wu_s(float %a) nounwind { ; CHECKIF-LABEL: fcvt_wu_s: @@ -334,7 +333,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.f32(float) define signext i32 @fmv_x_w(float %a, float %b) nounwind { ; CHECKIF-LABEL: fmv_x_w: @@ -801,7 +799,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.f32(float) define i64 @fcvt_lu_s(float %a) nounwind { ; RV32IF-LABEL: fcvt_lu_s: @@ -989,7 +986,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.f32(float) define float @fcvt_s_l(i64 %a) nounwind { ; RV32IF-LABEL: fcvt_s_l: @@ -1547,7 +1543,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.f32(float %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.f32(float) define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_i16: @@ -1709,7 +1704,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.f32(float %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.f32(float) define signext i8 @fcvt_w_s_i8(float %a) nounwind { ; RV32IF-LABEL: fcvt_w_s_i8: @@ -1889,7 +1883,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.f32(float %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.f32(float) define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_i8: @@ -2041,7 +2034,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.f32(float %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.f32(float) define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll index 7cdd1826b4522..0334d2556cd9a 100644 --- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll @@ -50,7 +50,6 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ogt: @@ -717,7 +716,6 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ogt: diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll index 8b883f781c9d9..3a4acfd8a41ee 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll @@ -18,8 +18,6 @@ ; RUN: -verify-machineinstrs -disable-strictnode-mutation \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) - define float @sqrt_f32(float %a) nounwind strictfp { ; CHECKIF-LABEL: sqrt_f32: ; CHECKIF: # %bb.0: @@ -52,8 +50,6 @@ define float @sqrt_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata) - define float @powi_f32(float %a, i32 %b) nounwind strictfp { ; RV32IF-LABEL: powi_f32: ; RV32IF: # %bb.0: @@ -115,8 +111,6 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata) - define float @sin_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: sin_f32: ; RV32IF: # %bb.0: @@ -175,8 +169,6 @@ define float @sin_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata) - define float @cos_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: cos_f32: ; RV32IF: # %bb.0: @@ -354,8 +346,6 @@ define float @sincos_f32(float %a) nounwind strictfp { ret float %3 } -declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata) - define float @tan_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: tan_f32: ; RV32IF: # %bb.0: @@ -588,8 +578,6 @@ define float @atan_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata) - define float @atan2_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: atan2_f32: ; RV32IF: # %bb.0: @@ -822,8 +810,6 @@ define float @tanh_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata) - define float @pow_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: pow_f32: ; RV32IF: # %bb.0: @@ -882,8 +868,6 @@ define float @pow_f32(float %a, float %b) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata) - define float @exp_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: exp_f32: ; RV32IF: # %bb.0: @@ -942,8 +926,6 @@ define float @exp_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata) - define float @exp2_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: exp2_f32: ; RV32IF: # %bb.0: @@ -1002,8 +984,6 @@ define float @exp2_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata) - define float @log_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: log_f32: ; RV32IF: # %bb.0: @@ -1062,8 +1042,6 @@ define float @log_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata) - define float @log10_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: log10_f32: ; RV32IF: # %bb.0: @@ -1122,8 +1100,6 @@ define float @log10_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata) - define float @log2_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: log2_f32: ; RV32IF: # %bb.0: @@ -1182,8 +1158,6 @@ define float @log2_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) - define float @fma_f32(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fma_f32: ; CHECKIF: # %bb.0: @@ -1216,8 +1190,6 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata) - define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp { ; CHECKIF-LABEL: fmuladd_f32: ; CHECKIF: # %bb.0: @@ -1260,8 +1232,6 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) - define float @minnum_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: minnum_f32: ; RV32IF: # %bb.0: @@ -1320,8 +1290,6 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) - define float @maxnum_f32(float %a, float %b) nounwind strictfp { ; RV32IF-LABEL: maxnum_f32: ; RV32IF: # %bb.0: @@ -1397,8 +1365,6 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp { ; ret float %1 ; } -declare float @llvm.experimental.constrained.floor.f32(float, metadata) - define float @floor_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: floor_f32: ; RV32IF: # %bb.0: @@ -1457,8 +1423,6 @@ define float @floor_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.ceil.f32(float, metadata) - define float @ceil_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: ceil_f32: ; RV32IF: # %bb.0: @@ -1517,8 +1481,6 @@ define float @ceil_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.trunc.f32(float, metadata) - define float @trunc_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: trunc_f32: ; RV32IF: # %bb.0: @@ -1577,8 +1539,6 @@ define float @trunc_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata) - define float @rint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: rint_f32: ; RV32IF: # %bb.0: @@ -1637,8 +1597,6 @@ define float @rint_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata) - define float @nearbyint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: nearbyint_f32: ; RV32IF: # %bb.0: @@ -1697,8 +1655,6 @@ define float @nearbyint_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.round.f32(float, metadata) - define float @round_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: round_f32: ; RV32IF: # %bb.0: @@ -1757,8 +1713,6 @@ define float @round_f32(float %a) nounwind strictfp { ret float %1 } -declare float @llvm.experimental.constrained.roundeven.f32(float, metadata) - define float @roundeven_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: roundeven_f32: ; RV32IF: # %bb.0: @@ -1817,8 +1771,6 @@ define float @roundeven_f32(float %a) nounwind strictfp { ret float %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f32(float, metadata, metadata) - define iXLen @lrint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: lrint_f32: ; RV32IF: # %bb.0: @@ -1861,8 +1813,6 @@ define iXLen @lrint_f32(float %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f32(float, metadata) - define iXLen @lround_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: lround_f32: ; RV32IF: # %bb.0: @@ -1905,8 +1855,6 @@ define iXLen @lround_f32(float %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata) - define i64 @llrint_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: llrint_f32: ; RV32IF: # %bb.0: @@ -1957,8 +1905,6 @@ define i64 @llrint_f32(float %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) - define i64 @llround_f32(float %a) nounwind strictfp { ; RV32IF-LABEL: llround_f32: ; RV32IF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index 5f673ac17d569..069e20da7b908 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -22,8 +22,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \ ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64I %s -declare float @llvm.sqrt.f32(float) - define float @sqrt_f32(float %a) nounwind { ; RV32IF-LABEL: sqrt_f32: ; RV32IF: # %bb.0: @@ -71,8 +69,6 @@ define float @sqrt_f32(float %a) nounwind { ret float %1 } -declare float @llvm.powi.f32.i32(float, i32) - define float @powi_f32(float %a, i32 %b) nounwind { ; RV32IF-LABEL: powi_f32: ; RV32IF: # %bb.0: @@ -134,8 +130,6 @@ define float @powi_f32(float %a, i32 %b) nounwind { ret float %1 } -declare float @llvm.sin.f32(float) - define float @sin_f32(float %a) nounwind { ; RV32IF-LABEL: sin_f32: ; RV32IF: # %bb.0: @@ -178,8 +172,6 @@ define float @sin_f32(float %a) nounwind { ret float %1 } -declare float @llvm.cos.f32(float) - define float @cos_f32(float %a) nounwind { ; RV32IF-LABEL: cos_f32: ; RV32IF: # %bb.0: @@ -359,8 +351,6 @@ define float @sincos_f32(float %a) nounwind { ret float %3 } -declare float @llvm.pow.f32(float, float) - define float @pow_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: pow_f32: ; RV32IF: # %bb.0: @@ -403,8 +393,6 @@ define float @pow_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.exp.f32(float) - define float @exp_f32(float %a) nounwind { ; RV32IF-LABEL: exp_f32: ; RV32IF: # %bb.0: @@ -447,8 +435,6 @@ define float @exp_f32(float %a) nounwind { ret float %1 } -declare float @llvm.exp2.f32(float) - define float @exp2_f32(float %a) nounwind { ; RV32IF-LABEL: exp2_f32: ; RV32IF: # %bb.0: @@ -533,8 +519,6 @@ define float @exp10_f32(float %a) nounwind { ret float %1 } -declare float @llvm.log.f32(float) - define float @log_f32(float %a) nounwind { ; RV32IF-LABEL: log_f32: ; RV32IF: # %bb.0: @@ -577,8 +561,6 @@ define float @log_f32(float %a) nounwind { ret float %1 } -declare float @llvm.log10.f32(float) - define float @log10_f32(float %a) nounwind { ; RV32IF-LABEL: log10_f32: ; RV32IF: # %bb.0: @@ -621,8 +603,6 @@ define float @log10_f32(float %a) nounwind { ret float %1 } -declare float @llvm.log2.f32(float) - define float @log2_f32(float %a) nounwind { ; RV32IF-LABEL: log2_f32: ; RV32IF: # %bb.0: @@ -665,8 +645,6 @@ define float @log2_f32(float %a) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fma_f32(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fma_f32: ; RV32IF: # %bb.0: @@ -714,8 +692,6 @@ define float @fma_f32(float %a, float %b, float %c) nounwind { ret float %1 } -declare float @llvm.fmuladd.f32(float, float, float) - define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fmuladd_f32: ; RV32IF: # %bb.0: @@ -773,8 +749,6 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ret float %1 } -declare float @llvm.fabs.f32(float) - define float @fabs_f32(float %a) nounwind { ; RV32IF-LABEL: fabs_f32: ; RV32IF: # %bb.0: @@ -816,8 +790,6 @@ define float @fabs_f32(float %a) nounwind { ret float %1 } -declare float @llvm.minnum.f32(float, float) - define float @minnum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: minnum_f32: ; RV32IF: # %bb.0: @@ -865,8 +837,6 @@ define float @minnum_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maxnum.f32(float, float) - define float @maxnum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: maxnum_f32: ; RV32IF: # %bb.0: @@ -914,8 +884,6 @@ define float @maxnum_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.copysign.f32(float, float) - define float @copysign_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: copysign_f32: ; RV32IF: # %bb.0: @@ -963,8 +931,6 @@ define float @copysign_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.floor.f32(float) - define float @floor_f32(float %a) nounwind { ; RV32IF-LABEL: floor_f32: ; RV32IF: # %bb.0: @@ -1055,8 +1021,6 @@ define float @floor_f32(float %a) nounwind { ret float %1 } -declare float @llvm.ceil.f32(float) - define float @ceil_f32(float %a) nounwind { ; RV32IF-LABEL: ceil_f32: ; RV32IF: # %bb.0: @@ -1147,8 +1111,6 @@ define float @ceil_f32(float %a) nounwind { ret float %1 } -declare float @llvm.trunc.f32(float) - define float @trunc_f32(float %a) nounwind { ; RV32IF-LABEL: trunc_f32: ; RV32IF: # %bb.0: @@ -1239,8 +1201,6 @@ define float @trunc_f32(float %a) nounwind { ret float %1 } -declare float @llvm.rint.f32(float) - define float @rint_f32(float %a) nounwind { ; RV32IF-LABEL: rint_f32: ; RV32IF: # %bb.0: @@ -1331,8 +1291,6 @@ define float @rint_f32(float %a) nounwind { ret float %1 } -declare float @llvm.nearbyint.f32(float) - define float @nearbyint_f32(float %a) nounwind { ; RV32IF-LABEL: nearbyint_f32: ; RV32IF: # %bb.0: @@ -1375,8 +1333,6 @@ define float @nearbyint_f32(float %a) nounwind { ret float %1 } -declare float @llvm.round.f32(float) - define float @round_f32(float %a) nounwind { ; RV32IF-LABEL: round_f32: ; RV32IF: # %bb.0: @@ -1467,8 +1423,6 @@ define float @round_f32(float %a) nounwind { ret float %1 } -declare float @llvm.roundeven.f32(float) - define float @roundeven_f32(float %a) nounwind { ; RV32IF-LABEL: roundeven_f32: ; RV32IF: # %bb.0: @@ -1559,8 +1513,6 @@ define float @roundeven_f32(float %a) nounwind { ret float %1 } -declare iXLen @llvm.lrint.iXLen.f32(float) - define iXLen @lrint_f32(float %a) nounwind { ; RV32IF-LABEL: lrint_f32: ; RV32IF: # %bb.0: @@ -1608,9 +1560,6 @@ define iXLen @lrint_f32(float %a) nounwind { ret iXLen %1 } -declare i32 @llvm.lround.i32.f32(float) -declare i64 @llvm.lround.i64.f32(float) - define iXLen @lround_f32(float %a) nounwind { ; RV32IF-LABEL: lround_f32: ; RV32IF: # %bb.0: @@ -1707,8 +1656,6 @@ define i32 @lround_i32_f32(float %a) nounwind { ret i32 %1 } -declare i64 @llvm.llrint.i64.f32(float) - define i64 @llrint_f32(float %a) nounwind { ; RV32IF-LABEL: llrint_f32: ; RV32IF: # %bb.0: @@ -1764,8 +1711,6 @@ define i64 @llrint_f32(float %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f32(float) - define i64 @llround_f32(float %a) nounwind { ; RV32IF-LABEL: llround_f32: ; RV32IF: # %bb.0: @@ -1821,7 +1766,6 @@ define i64 @llround_f32(float %a) nounwind { ret i64 %1 } -declare i1 @llvm.is.fpclass.f32(float, i32) define i1 @fpclass(float %x) { ; RV32IF-LABEL: fpclass: ; RV32IF: # %bb.0: @@ -2505,8 +2449,6 @@ define float @tan_f32(float %a) nounwind { ret float %1 } -declare float @llvm.maximumnum.f32(float, float) - define float @maximumnum_float(float %x, float %y) { ; RV32IF-LABEL: maximumnum_float: ; RV32IF: # %bb.0: @@ -2562,8 +2504,6 @@ define float @maximumnum_float(float %x, float %y) { ret float %z } -declare float @llvm.minimumnum.f32(float, float) - define float @minimumnum_float(float %x, float %y) { ; RV32IF-LABEL: minimumnum_float: ; RV32IF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll index 2e9f8cbf6d2ef..806200c3f0b8e 100644 --- a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll +++ b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll @@ -24,8 +24,6 @@ ; RUN: -verify-machineinstrs -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare float @llvm.minimum.f32(float, float) - define float @fminimum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: fminimum_f32: ; RV32IF: # %bb.0: @@ -124,8 +122,6 @@ define float @fminimum_f32(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maximum.f32(float, float) - define float @fmaximum_f32(float %a, float %b) nounwind { ; RV32IF-LABEL: fmaximum_f32: ; RV32IF: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll index 6871f29cb8b05..33fc51363cb56 100644 --- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll @@ -1568,13 +1568,3 @@ define i64 @test_rint_ui64(float %x) nounwind { ret i64 %b } -declare float @llvm.floor.f32(float) -declare float @llvm.ceil.f32(float) -declare float @llvm.trunc.f32(float) -declare float @llvm.round.f32(float) -declare float @llvm.roundeven.f32(float) -declare float @llvm.rint.f32(float) -declare i32 @llvm.fptosi.sat.i32.f32(float) -declare i64 @llvm.fptosi.sat.i64.f32(float) -declare i32 @llvm.fptoui.sat.i32.f32(float) -declare i64 @llvm.fptoui.sat.i64.f32(float) diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll index 837ff766b430f..f71eaec7ab8c7 100644 --- a/llvm/test/CodeGen/RISCV/float-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll @@ -1753,8 +1753,3 @@ define float @test_roundeven_float(float %x) { ret float %a } -declare float @llvm.floor.f32(float) -declare float @llvm.ceil.f32(float) -declare float @llvm.trunc.f32(float) -declare float @llvm.round.f32(float) -declare float @llvm.roundeven.f32(float) diff --git a/llvm/test/CodeGen/RISCV/float-select-verify.ll b/llvm/test/CodeGen/RISCV/float-select-verify.ll index 2d5d6d7cb4825..bebbf2fae5226 100644 --- a/llvm/test/CodeGen/RISCV/float-select-verify.ll +++ b/llvm/test/CodeGen/RISCV/float-select-verify.ll @@ -87,4 +87,3 @@ declare void @foo(i64) declare void @bar(float) -declare float @llvm.round.f32(float) diff --git a/llvm/test/CodeGen/RISCV/float-zfa.ll b/llvm/test/CodeGen/RISCV/float-zfa.ll index aec5ac75a9795..7be0d998f38c3 100644 --- a/llvm/test/CodeGen/RISCV/float-zfa.ll +++ b/llvm/test/CodeGen/RISCV/float-zfa.ll @@ -116,8 +116,6 @@ define float @loadfpimm13() { ret float 0xb810000000000000 } -declare float @llvm.minimum.f32(float, float) - define float @fminm_s(float %a, float %b) nounwind { ; CHECK-LABEL: fminm_s: ; CHECK: # %bb.0: @@ -127,8 +125,6 @@ define float @fminm_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.maximum.f32(float, float) - define float @fmaxm_s(float %a, float %b) nounwind { ; CHECK-LABEL: fmaxm_s: ; CHECK: # %bb.0: @@ -138,7 +134,6 @@ define float @fmaxm_s(float %a, float %b) nounwind { ret float %1 } - define float @fround_s_1(float %a) nounwind { ; CHECK-LABEL: fround_s_1: ; CHECK: # %bb.0: @@ -150,7 +145,6 @@ define float @fround_s_1(float %a) nounwind { declare float @roundf(float) nounwind readnone - define float @fround_s_2(float %a) nounwind { ; CHECK-LABEL: fround_s_2: ; CHECK: # %bb.0: @@ -162,7 +156,6 @@ define float @fround_s_2(float %a) nounwind { declare float @floorf(float) nounwind readnone - define float @fround_s_3(float %a) nounwind { ; CHECK-LABEL: fround_s_3: ; CHECK: # %bb.0: @@ -174,7 +167,6 @@ define float @fround_s_3(float %a) nounwind { declare float @ceilf(float) nounwind readnone - define float @fround_s_4(float %a) nounwind { ; CHECK-LABEL: fround_s_4: ; CHECK: # %bb.0: @@ -186,7 +178,6 @@ define float @fround_s_4(float %a) nounwind { declare float @truncf(float) nounwind readnone - define float @fround_s_5(float %a) nounwind { ; CHECK-LABEL: fround_s_5: ; CHECK: # %bb.0: @@ -207,9 +198,6 @@ define float @fround_s_6(float %a) nounwind { ret float %call } -declare float @llvm.roundeven.f32(float) nounwind readnone - - define float @froundnx_s(float %a) nounwind { ; CHECK-LABEL: froundnx_s: ; CHECK: # %bb.0: @@ -221,8 +209,6 @@ define float @froundnx_s(float %a) nounwind { declare float @rintf(float) nounwind readnone -declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) - define i32 @fcmp_olt_q(float %a, float %b) nounwind strictfp { ; CHECK-LABEL: fcmp_olt_q: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/flt-rounds.ll b/llvm/test/CodeGen/RISCV/flt-rounds.ll index 4456c36cfb5de..df72a08117a5d 100644 --- a/llvm/test/CodeGen/RISCV/flt-rounds.ll +++ b/llvm/test/CodeGen/RISCV/flt-rounds.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s -declare i32 @llvm.get.rounding() - define i32 @test_flt_rounds() nounwind { ; RV32I-LABEL: test_flt_rounds: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/fmax-fmin.ll b/llvm/test/CodeGen/RISCV/fmax-fmin.ll index 9d5729802a0ff..3b5010551005a 100644 --- a/llvm/test/CodeGen/RISCV/fmax-fmin.ll +++ b/llvm/test/CodeGen/RISCV/fmax-fmin.ll @@ -300,7 +300,3 @@ define double @minnum_f64_fast(double %x, double %y) nounwind { ret double %r } -declare float @llvm.maxnum.f32(float, float) -declare double @llvm.maxnum.f64(double, double) -declare float @llvm.minnum.f32(float, float) -declare double @llvm.minnum.f64(double, double) diff --git a/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll b/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll index e9b771a0698de..1f55a474484eb 100644 --- a/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll +++ b/llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll @@ -5,10 +5,6 @@ ; RUN: llc --mtriple=riscv32 --mattr=+d,+zfh < %s | FileCheck %s --check-prefixes=CHECK,CHECK-FP16-RV32 ; RUN: llc --mtriple=riscv32 --mattr=+d,-zfh < %s | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16-RV32 -declare half @llvm.fcanonicalize.f16(half) -declare float @llvm.fcanonicalize.f32(float) -declare double @llvm.fcanonicalize.f64(double) - define half @fcanonicalize_f16(half %x) { ; CHECK-FP16-RV64-LABEL: fcanonicalize_f16: ; CHECK-FP16-RV64: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll index a0d1ecce74e04..a724556e553d5 100644 --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -1987,9 +1987,6 @@ entry: ret i64 %conv6 } - - - ; i32 saturate define i32 @stest_f64i32_mm(double %x) { @@ -3875,12 +3872,3 @@ define i32 @ustest_f16i32_nsat(half %x) { ret i32 %spec.store.select7 } -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.smax.i32(i32, i32) -declare i32 @llvm.umin.i32(i32, i32) -declare i64 @llvm.smin.i64(i64, i64) -declare i64 @llvm.smax.i64(i64, i64) -declare i64 @llvm.umin.i64(i64, i64) -declare i128 @llvm.smin.i128(i128, i128) -declare i128 @llvm.smax.i128(i128, i128) -declare i128 @llvm.umin.i128(i128, i128) diff --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll index b4a1400dbd547..d241f114716ab 100644 --- a/llvm/test/CodeGen/RISCV/fpenv.ll +++ b/llvm/test/CodeGen/RISCV/fpenv.ll @@ -214,7 +214,5 @@ define void @func_07() { attributes #0 = { strictfp } -declare void @llvm.set.rounding(i32) -declare i32 @llvm.get.rounding() declare i32 @fesetround(i32 noundef) diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll index 10d542496e0f7..799d337e7a452 100644 --- a/llvm/test/CodeGen/RISCV/frame.ll +++ b/llvm/test/CodeGen/RISCV/frame.ll @@ -48,6 +48,4 @@ define i32 @test() nounwind { ret i32 0 } -declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) - declare void @test1(ptr) diff --git a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll index 478c8457997ae..fdc650e8819a2 100644 --- a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll +++ b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll @@ -5,8 +5,6 @@ ; RUN: | FileCheck -check-prefix=RV64I %s declare void @notdead(ptr) -declare ptr @llvm.frameaddress(i32) -declare ptr @llvm.returnaddress(i32) define ptr @test_frameaddress_0() nounwind { ; RV32I-LABEL: test_frameaddress_0: diff --git a/llvm/test/CodeGen/RISCV/frm-dependency.ll b/llvm/test/CodeGen/RISCV/frm-dependency.ll index a596c34ef9123..a2abd46267319 100644 --- a/llvm/test/CodeGen/RISCV/frm-dependency.ll +++ b/llvm/test/CodeGen/RISCV/frm-dependency.ll @@ -30,8 +30,6 @@ define float @fadd_s(float %a, float %b) nounwind { ret float %1 } -declare float @llvm.fma.f32(float, float, float) - define float @fmadd_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: name: fmadd_s ; RV32IF: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/get-register-invalid.ll b/llvm/test/CodeGen/RISCV/get-register-invalid.ll index a86de3e8868f4..48e2c5522f2ec 100644 --- a/llvm/test/CodeGen/RISCV/get-register-invalid.ll +++ b/llvm/test/CodeGen/RISCV/get-register-invalid.ll @@ -7,6 +7,4 @@ entry: ret i32 %reg } -declare i32 @llvm.read_register.i32(metadata) nounwind - !0 = !{!"notareg\00"} diff --git a/llvm/test/CodeGen/RISCV/get-register-noreserve.ll b/llvm/test/CodeGen/RISCV/get-register-noreserve.ll index 211ee0ea602b3..99248a9e3a798 100644 --- a/llvm/test/CodeGen/RISCV/get-register-noreserve.ll +++ b/llvm/test/CodeGen/RISCV/get-register-noreserve.ll @@ -41,10 +41,6 @@ entry: ret i32 %sp } - -declare i32 @llvm.read_register.i32(metadata) nounwind -declare void @llvm.write_register.i32(metadata, i32) nounwind - !0 = !{!"sp\00"} !1 = !{!"x4\00"} !2 = !{!"vlenb"} diff --git a/llvm/test/CodeGen/RISCV/get-register-reserve.ll b/llvm/test/CodeGen/RISCV/get-register-reserve.ll index 7549b4dd3f682..cce36240d5681 100644 --- a/llvm/test/CodeGen/RISCV/get-register-reserve.ll +++ b/llvm/test/CodeGen/RISCV/get-register-reserve.ll @@ -28,7 +28,5 @@ entry: ret i32 %fp } -declare i32 @llvm.read_register.i32(metadata) nounwind - !0 = !{!"a1\00"} !1 = !{!"fp\00"} diff --git a/llvm/test/CodeGen/RISCV/half-arith-strict.ll b/llvm/test/CodeGen/RISCV/half-arith-strict.ll index 74e7f8bdc565f..91e70145c316c 100644 --- a/llvm/test/CodeGen/RISCV/half-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-arith-strict.ll @@ -54,7 +54,6 @@ define half @fadd_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata) define half @fsub_h(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fsub_h: @@ -85,7 +84,6 @@ define half @fsub_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata) define half @fmul_h(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fmul_h: @@ -116,7 +114,6 @@ define half @fmul_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata) define half @fdiv_h(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fdiv_h: @@ -147,7 +144,6 @@ define half @fdiv_h(half %a, half %b) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata) define half @fsqrt_h(half %a) nounwind strictfp { ; CHECK-LABEL: fsqrt_h: @@ -176,7 +172,6 @@ define half @fsqrt_h(half %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) ; FIXME: fminnum/fmaxnum need libcalls to handle SNaN, but we don't have f16 ; libcalls and don't support promotion yet. @@ -223,7 +218,6 @@ define half @fmadd_h(half %a, half %b, half %c) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } -declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) strictfp define half @fmsub_h(half %a, half %b, half %c) nounwind strictfp { ; CHECK-LABEL: fmsub_h: diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll index d089e3678756c..e1eb860d26591 100644 --- a/llvm/test/CodeGen/RISCV/half-arith.ll +++ b/llvm/test/CodeGen/RISCV/half-arith.ll @@ -353,8 +353,6 @@ define half @fdiv_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.sqrt.f16(half) - define half @fsqrt_h(half %a) nounwind { ; CHECKIZFH-LABEL: fsqrt_h: ; CHECKIZFH: # %bb.0: @@ -409,8 +407,6 @@ define half @fsqrt_h(half %a) nounwind { ret half %1 } -declare half @llvm.copysign.f16(half, half) - define half @fsgnj_h(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fsgnj_h: ; CHECKIZFH: # %bb.0: @@ -760,8 +756,6 @@ define half @fsgnjn_h(half %a, half %b) nounwind { ret half %3 } -declare half @llvm.fabs.f16(half) - ; This function performs extra work to ensure that ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. define half @fabs_h(half %a, half %b) nounwind { @@ -916,8 +910,6 @@ define half @fabs_h(half %a, half %b) nounwind { ret half %3 } -declare half @llvm.minnum.f16(half, half) - define half @fmin_h(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fmin_h: ; CHECKIZFH: # %bb.0: @@ -1000,8 +992,6 @@ define half @fmin_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maxnum.f16(half, half) - define half @fmax_h(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fmax_h: ; CHECKIZFH: # %bb.0: @@ -1084,8 +1074,6 @@ define half @fmax_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.fma.f16(half, half, half) - define half @fmadd_h(half %a, half %b, half %c) nounwind { ; CHECKIZFH-LABEL: fmadd_h: ; CHECKIZFH: # %bb.0: @@ -1762,7 +1750,6 @@ define half @fnmadd_h_3(half %a, half %b, half %c) nounwind { ret half %neg } - define half @fnmadd_nsz(half %a, half %b, half %c) nounwind { ; RV32IZFH-LABEL: fnmadd_nsz: ; RV32IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll index 730bde5af610b..cfe22b7d4f3b0 100644 --- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll @@ -86,8 +86,6 @@ define half @fneg(half %a) nounwind { ret half %1 } -declare half @llvm.fabs.f16(half) - define half @fabs(half %a) nounwind { ; RV32I-LABEL: fabs: ; RV32I: # %bb.0: @@ -154,8 +152,6 @@ define half @fabs(half %a) nounwind { ret half %1 } -declare half @llvm.copysign.f16(half, half) - ; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise ; operations if half precision floating point isn't supported. A combine could ; be written to do the same even when f16 is legal. diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll index a607893a3735b..daeb75c31d614 100644 --- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll @@ -120,7 +120,6 @@ define i16 @fcvt_si_h(half %a) nounwind strictfp { %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict") ret i16 %1 } -declare i16 @llvm.experimental.constrained.fptosi.i16.f16(half, metadata) define i16 @fcvt_ui_h(half %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_ui_h: @@ -183,7 +182,6 @@ define i16 @fcvt_ui_h(half %a) nounwind strictfp { %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict") ret i16 %1 } -declare i16 @llvm.experimental.constrained.fptoui.i16.f16(half, metadata) define i32 @fcvt_w_h(half %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_w_h: @@ -246,7 +244,6 @@ define i32 @fcvt_w_h(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) define i32 @fcvt_wu_h(half %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_wu_h: @@ -309,7 +306,6 @@ define i32 @fcvt_wu_h(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) ; Test where the fptoui has multiple uses, one of which causes a sext to be ; inserted on RV64. @@ -475,7 +471,6 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata) define i64 @fcvt_lu_h(half %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_lu_h: @@ -552,7 +547,6 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp { %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict") ret i64 %1 } -declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata) define half @fcvt_h_si(i16 %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_h_si: @@ -633,7 +627,6 @@ define half @fcvt_h_si(i16 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i16(i16, metadata, metadata) define half @fcvt_h_si_signext(i16 signext %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_si_signext: @@ -776,7 +769,6 @@ define half @fcvt_h_ui(i16 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i16(i16, metadata, metadata) define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_ui_zeroext: @@ -901,7 +893,6 @@ define half @fcvt_h_w(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata) define half @fcvt_h_w_load(ptr %p) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_w_load: @@ -1036,7 +1027,6 @@ define half @fcvt_h_wu(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata) define half @fcvt_h_wu_load(ptr %p) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_wu_load: @@ -1185,7 +1175,6 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata) define half @fcvt_h_lu(i64 %a) nounwind strictfp { ; CHECK32-IZFH-LABEL: fcvt_h_lu: @@ -1262,7 +1251,6 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata) define half @fcvt_h_s(float %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_h_s: @@ -1320,7 +1308,6 @@ define half @fcvt_h_s(float %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata) define float @fcvt_s_h(half %a) nounwind strictfp { ; CHECKIZFH-LABEL: fcvt_s_h: @@ -1378,7 +1365,6 @@ define float @fcvt_s_h(half %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.fpext.f32.f16(half %a, metadata !"fpexcept.strict") ret float %1 } -declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata) define half @fcvt_h_d(double %a) nounwind strictfp { ; RV32IZFH-LABEL: fcvt_h_d: @@ -1488,7 +1474,6 @@ define half @fcvt_h_d(double %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata) define double @fcvt_d_h(half %a) nounwind strictfp { ; RV32IZFH-LABEL: fcvt_d_h: @@ -1607,7 +1592,6 @@ define double @fcvt_d_h(half %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.fpext.f64.f16(half %a, metadata !"fpexcept.strict") ret double %1 } -declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata) ; Make sure we select W version of addi on RV64. define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) strictfp { @@ -2088,4 +2072,3 @@ define fp128 @fcvt_q_h(half %a) nounwind strictfp { %1 = call fp128 @llvm.experimental.constrained.fpext.f128.f16(half %a, metadata !"fpexcept.strict") ret fp128 %1 } -declare fp128 @llvm.experimental.constrained.fpext.f128.f16(half, metadata) diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index c3c06e192f76f..cdf07fc5b1ee7 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -570,7 +570,6 @@ start: %0 = tail call i16 @llvm.fptosi.sat.i16.f16(half %a) ret i16 %0 } -declare i16 @llvm.fptosi.sat.i16.f16(half) define i16 @fcvt_ui_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_ui_h: @@ -1016,7 +1015,6 @@ start: %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a) ret i16 %0 } -declare i16 @llvm.fptoui.sat.i16.f16(half) define i32 @fcvt_w_h(half %a) nounwind { ; CHECKIZFH-LABEL: fcvt_w_h: @@ -1406,7 +1404,6 @@ start: %0 = tail call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %0 } -declare i32 @llvm.fptosi.sat.i32.f16(half) define i32 @fcvt_wu_h(half %a) nounwind { ; CHECKIZFH-LABEL: fcvt_wu_h: @@ -1990,7 +1987,6 @@ start: %0 = tail call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %0 } -declare i32 @llvm.fptoui.sat.i32.f16(half) define i64 @fcvt_l_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_l_h: @@ -2753,7 +2749,6 @@ start: %0 = tail call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %0 } -declare i64 @llvm.fptosi.sat.i64.f16(half) define i64 @fcvt_lu_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_lu_h: @@ -3348,7 +3343,6 @@ start: %0 = tail call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %0 } -declare i64 @llvm.fptoui.sat.i64.f16(half) define half @fcvt_h_si(i16 %a) nounwind { ; RV32IZFH-LABEL: fcvt_h_si: @@ -7684,7 +7678,6 @@ start: %0 = tail call i8 @llvm.fptosi.sat.i8.f16(half %a) ret i8 %0 } -declare i8 @llvm.fptosi.sat.i8.f16(half) define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_wu_s_i8: @@ -8110,7 +8103,6 @@ start: %0 = tail call i8 @llvm.fptoui.sat.i8.f16(half %a) ret i8 %0 } -declare i8 @llvm.fptoui.sat.i8.f16(half) define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_wu_h_sat_zext: diff --git a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll index 12cf088e3205f..88f0b9db650ae 100644 --- a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll @@ -48,7 +48,6 @@ define i32 @fcmp_oeq(half %a, half %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata) define i32 @fcmp_ogt(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmp_ogt: @@ -653,7 +652,6 @@ define i32 @fcmps_oeq(half %a, half %b) nounwind strictfp { %2 = zext i1 %1 to i32 ret i32 %2 } -declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata) define i32 @fcmps_ogt(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmps_ogt: diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll index 847054d96968a..e712bd919b0b1 100644 --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -56,8 +56,6 @@ ; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN %s -declare half @llvm.sqrt.f16(half) - define half @sqrt_f16(half %a) nounwind { ; CHECKIZFH-LABEL: sqrt_f16: ; CHECKIZFH: # %bb.0: @@ -112,8 +110,6 @@ define half @sqrt_f16(half %a) nounwind { ret half %1 } -declare half @llvm.powi.f16.i32(half, i32) - define half @powi_f16(half %a, i32 %b) nounwind { ; RV32IZFH-LABEL: powi_f16: ; RV32IZFH: # %bb.0: @@ -244,8 +240,6 @@ define half @powi_f16(half %a, i32 %b) nounwind { ret half %1 } -declare half @llvm.sin.f16(half) - define half @sin_f16(half %a) nounwind { ; RV32IZFH-LABEL: sin_f16: ; RV32IZFH: # %bb.0: @@ -364,8 +358,6 @@ define half @sin_f16(half %a) nounwind { ret half %1 } -declare half @llvm.cos.f16(half) - define half @cos_f16(half %a) nounwind { ; RV32IZFH-LABEL: cos_f16: ; RV32IZFH: # %bb.0: @@ -819,8 +811,6 @@ define half @sincos_f16(half %a) nounwind { ret half %3 } -declare half @llvm.pow.f16(half, half) - define half @pow_f16(half %a, half %b) nounwind { ; RV32IZFH-LABEL: pow_f16: ; RV32IZFH: # %bb.0: @@ -973,8 +963,6 @@ define half @pow_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.exp.f16(half) - define half @exp_f16(half %a) nounwind { ; RV32IZFH-LABEL: exp_f16: ; RV32IZFH: # %bb.0: @@ -1093,8 +1081,6 @@ define half @exp_f16(half %a) nounwind { ret half %1 } -declare half @llvm.exp2.f16(half) - define half @exp2_f16(half %a) nounwind { ; RV32IZFH-LABEL: exp2_f16: ; RV32IZFH: # %bb.0: @@ -1331,8 +1317,6 @@ define half @exp10_f16(half %a) nounwind { ret half %1 } -declare half @llvm.log.f16(half) - define half @log_f16(half %a) nounwind { ; RV32IZFH-LABEL: log_f16: ; RV32IZFH: # %bb.0: @@ -1451,8 +1435,6 @@ define half @log_f16(half %a) nounwind { ret half %1 } -declare half @llvm.log10.f16(half) - define half @log10_f16(half %a) nounwind { ; RV32IZFH-LABEL: log10_f16: ; RV32IZFH: # %bb.0: @@ -1571,8 +1553,6 @@ define half @log10_f16(half %a) nounwind { ret half %1 } -declare half @llvm.log2.f16(half) - define half @log2_f16(half %a) nounwind { ; RV32IZFH-LABEL: log2_f16: ; RV32IZFH: # %bb.0: @@ -1691,8 +1671,6 @@ define half @log2_f16(half %a) nounwind { ret half %1 } -declare half @llvm.fma.f16(half, half, half) - define half @fma_f16(half %a, half %b, half %c) nounwind { ; CHECKIZFH-LABEL: fma_f16: ; CHECKIZFH: # %bb.0: @@ -1791,8 +1769,6 @@ define half @fma_f16(half %a, half %b, half %c) nounwind { ret half %1 } -declare half @llvm.fmuladd.f16(half, half, half) - define half @fmuladd_f16(half %a, half %b, half %c) nounwind { ; CHECKIZFH-LABEL: fmuladd_f16: ; CHECKIZFH: # %bb.0: @@ -1907,8 +1883,6 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind { ret half %1 } -declare half @llvm.fabs.f16(half) - define half @fabs_f16(half %a) nounwind { ; CHECKIZFH-LABEL: fabs_f16: ; CHECKIZFH: # %bb.0: @@ -1967,8 +1941,6 @@ define half @fabs_f16(half %a) nounwind { ret half %1 } -declare half @llvm.minnum.f16(half, half) - define half @minnum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: minnum_f16: ; CHECKIZFH: # %bb.0: @@ -2051,8 +2023,6 @@ define half @minnum_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maxnum.f16(half, half) - define half @maxnum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: maxnum_f16: ; CHECKIZFH: # %bb.0: @@ -2135,8 +2105,6 @@ define half @maxnum_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.copysign.f16(half, half) - define half @copysign_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: copysign_f16: ; CHECKIZFH: # %bb.0: @@ -2217,8 +2185,6 @@ define half @copysign_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.floor.f16(half) - define half @floor_f16(half %a) nounwind { ; CHECKIZFH-LABEL: floor_f16: ; CHECKIZFH: # %bb.0: @@ -2309,8 +2275,6 @@ define half @floor_f16(half %a) nounwind { ret half %1 } -declare half @llvm.ceil.f16(half) - define half @ceil_f16(half %a) nounwind { ; CHECKIZFH-LABEL: ceil_f16: ; CHECKIZFH: # %bb.0: @@ -2401,8 +2365,6 @@ define half @ceil_f16(half %a) nounwind { ret half %1 } -declare half @llvm.trunc.f16(half) - define half @trunc_f16(half %a) nounwind { ; CHECKIZFH-LABEL: trunc_f16: ; CHECKIZFH: # %bb.0: @@ -2493,8 +2455,6 @@ define half @trunc_f16(half %a) nounwind { ret half %1 } -declare half @llvm.rint.f16(half) - define half @rint_f16(half %a) nounwind { ; CHECKIZFH-LABEL: rint_f16: ; CHECKIZFH: # %bb.0: @@ -2585,8 +2545,6 @@ define half @rint_f16(half %a) nounwind { ret half %1 } -declare half @llvm.nearbyint.f16(half) - define half @nearbyint_f16(half %a) nounwind { ; RV32IZFH-LABEL: nearbyint_f16: ; RV32IZFH: # %bb.0: @@ -2705,8 +2663,6 @@ define half @nearbyint_f16(half %a) nounwind { ret half %1 } -declare half @llvm.round.f16(half) - define half @round_f16(half %a) nounwind { ; CHECKIZFH-LABEL: round_f16: ; CHECKIZFH: # %bb.0: @@ -2797,8 +2753,6 @@ define half @round_f16(half %a) nounwind { ret half %1 } -declare half @llvm.roundeven.f16(half) - define half @roundeven_f16(half %a) nounwind { ; CHECKIZFH-LABEL: roundeven_f16: ; CHECKIZFH: # %bb.0: @@ -2889,7 +2843,6 @@ define half @roundeven_f16(half %a) nounwind { ret half %1 } -declare i1 @llvm.is.fpclass.f16(half, i32) define i1 @isnan_d_fpclass(half %x) { ; CHECKIZFH-LABEL: isnan_d_fpclass: ; CHECKIZFH: # %bb.0: @@ -2966,8 +2919,6 @@ define i1 @isnan_d_fpclass(half %x) { ret i1 %1 } -declare half @llvm.tan.f16(half) - define half @tan_f16(half %a) nounwind { ; RV32IZFH-LABEL: tan_f16: ; RV32IZFH: # %bb.0: @@ -3086,8 +3037,6 @@ define half @tan_f16(half %a) nounwind { ret half %1 } -declare half @llvm.maximumnum.f16(half, half) - define half @maximumnum_half(half %x, half %y) { ; CHECKIZFH-LABEL: maximumnum_half: ; CHECKIZFH: # %bb.0: @@ -3190,8 +3139,6 @@ define half @maximumnum_half(half %x, half %y) { ret half %z } -declare half @llvm.minimumnum.f16(half, half) - define half @minimumnum_half(half %x, half %y) { ; CHECKIZFH-LABEL: minimumnum_half: ; CHECKIZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll index bc3f44363fb95..23ca1992614a1 100644 --- a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll +++ b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefixes=CHECKIZHINX %s -declare half @llvm.minimum.f16(half, half) - define half @fminimum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fminimum_f16: ; CHECKIZFH: # %bb.0: @@ -56,8 +54,6 @@ define half @fminimum_f16(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maximum.f16(half, half) - define half @fmaximum_f16(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fmaximum_f16: ; CHECKIZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll index c815bc19e280c..67f69120b9aea 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll @@ -4300,13 +4300,3 @@ define i64 @test_rint_ui64(half %x) nounwind { ret i64 %b } -declare half @llvm.floor.f16(half) -declare half @llvm.ceil.f16(half) -declare half @llvm.trunc.f16(half) -declare half @llvm.round.f16(half) -declare half @llvm.roundeven.f16(half) -declare half @llvm.rint.f16(half) -declare i32 @llvm.fptosi.sat.i32.f16(half) -declare i64 @llvm.fptosi.sat.i64.f16(half) -declare i32 @llvm.fptoui.sat.i32.f16(half) -declare i64 @llvm.fptoui.sat.i64.f16(half) diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll index cfc997d66ec56..b80a4cd13eb8a 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll @@ -4901,8 +4901,3 @@ define half @test_roundeven_half(half %x) { ret half %a } -declare half @llvm.floor.f16(half) -declare half @llvm.ceil.f16(half) -declare half @llvm.trunc.f16(half) -declare half @llvm.round.f16(half) -declare half @llvm.roundeven.f16(half) diff --git a/llvm/test/CodeGen/RISCV/half-zfa.ll b/llvm/test/CodeGen/RISCV/half-zfa.ll index 90c66e7fe2ca4..9a5b9137466a2 100644 --- a/llvm/test/CodeGen/RISCV/half-zfa.ll +++ b/llvm/test/CodeGen/RISCV/half-zfa.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -target-abi lp64f -mattr=+zfa,+zfhmin < %s \ ; RUN: | FileCheck %s --check-prefix=ZFHMIN -declare half @llvm.minimum.f16(half, half) - define half @fminm_h(half %a, half %b) nounwind { ; CHECK-LABEL: fminm_h: ; CHECK: # %bb.0: @@ -27,8 +25,6 @@ define half @fminm_h(half %a, half %b) nounwind { ret half %1 } -declare half @llvm.maximum.f16(half, half) - define half @fmaxm_h(half %a, half %b) nounwind { ; CHECK-LABEL: fmaxm_h: ; CHECK: # %bb.0: @@ -62,9 +58,6 @@ define half @fround_h_1(half %a) nounwind { ret half %call } -declare half @llvm.round.f16(half) nounwind readnone - - define half @fround_h_2(half %a) nounwind { ; CHECK-LABEL: fround_h_2: ; CHECK: # %bb.0: @@ -81,9 +74,6 @@ define half @fround_h_2(half %a) nounwind { ret half %call } -declare half @llvm.floor.f16(half) nounwind readnone - - define half @fround_h_3(half %a) nounwind { ; CHECK-LABEL: fround_h_3: ; CHECK: # %bb.0: @@ -100,9 +90,6 @@ define half @fround_h_3(half %a) nounwind { ret half %call } -declare half @llvm.ceil.f16(half) nounwind readnone - - define half @fround_h_4(half %a) nounwind { ; CHECK-LABEL: fround_h_4: ; CHECK: # %bb.0: @@ -119,9 +106,6 @@ define half @fround_h_4(half %a) nounwind { ret half %call } -declare half @llvm.trunc.f16(half) nounwind readnone - - define half @fround_h_5(half %a) nounwind { ; CHECK-LABEL: fround_h_5: ; CHECK: # %bb.0: @@ -138,8 +122,6 @@ define half @fround_h_5(half %a) nounwind { ret half %call } -declare half @llvm.nearbyint.f16(half) nounwind readnone - define half @fround_h_6(half %a) nounwind { ; CHECK-LABEL: fround_h_6: ; CHECK: # %bb.0: @@ -156,9 +138,6 @@ define half @fround_h_6(half %a) nounwind { ret half %call } -declare half @llvm.roundeven.f16(half) nounwind readnone - - define half @froundnx_h(half %a) nounwind { ; CHECK-LABEL: froundnx_h: ; CHECK: # %bb.0: @@ -175,10 +154,6 @@ define half @froundnx_h(half %a) nounwind { ret half %call } -declare half @llvm.rint.f16(half) nounwind readnone - -declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata) - define i32 @fcmp_olt_q(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmp_olt_q: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll b/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll index cf780f4f7f76c..5c4c2a7f7dd0c 100644 --- a/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll +++ b/llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll @@ -36,8 +36,6 @@ define ptr @f2(ptr %x0, ptr %x1) { ret ptr %x0 } -declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32) - ; CHECK: .section .text.hot,"axG",@progbits,__hwasan_check_x10_2_short,comdat ; CHECK-NEXT: .type __hwasan_check_x10_2_short,@function ; CHECK-NEXT: .weak __hwasan_check_x10_2_short diff --git a/llvm/test/CodeGen/RISCV/i64-icmp.ll b/llvm/test/CodeGen/RISCV/i64-icmp.ll index 2742b9a3655d3..8e079450d9709 100644 --- a/llvm/test/CodeGen/RISCV/i64-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i64-icmp.ll @@ -828,4 +828,3 @@ define i64 @mask_test_eq_multiuse(i64 %x, ptr %p) nounwind { ret i64 %ext } -declare i64 @llvm.umin.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll index c157c63722cb4..35ff8bece9b5d 100644 --- a/llvm/test/CodeGen/RISCV/iabs.ll +++ b/llvm/test/CodeGen/RISCV/iabs.ll @@ -8,12 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64ZBB -declare i8 @llvm.abs.i8(i8, i1 immarg) -declare i16 @llvm.abs.i16(i16, i1 immarg) -declare i32 @llvm.abs.i32(i32, i1 immarg) -declare i64 @llvm.abs.i64(i64, i1 immarg) -declare i128 @llvm.abs.i128(i128, i1 immarg) - define i8 @abs8(i8 %x) { ; RV32I-LABEL: abs8: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll index a06c7505d543d..e0aed2d4f90ff 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll @@ -234,10 +234,4 @@ define i16 @ctz_v8i1_i16_ret(<8 x i1> %a) { ret i16 %res } -declare i64 @llvm.experimental.cttz.elts.i64.nxv8i16(, i1) -declare i32 @llvm.experimental.cttz.elts.i32.nxv16i1(, i1) -declare i32 @llvm.experimental.cttz.elts.i32.nxv4i32(, i1) -declare i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1>, i1) -declare i16 @llvm.experimental.cttz.elts.i16.v16i1(<8 x i1>, i1) - attributes #0 = { vscale_range(2,1024) } diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll index 1216d3000e8c8..632c9a5a75911 100644 --- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll +++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll @@ -60,5 +60,3 @@ define i32 @ctz_v2i1_poison(<2 x i1> %a) { ret i32 %res } -declare i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1>, i1) -declare i16 @llvm.experimental.cttz.elts.i16.v4i32(<4 x i32>, i1) diff --git a/llvm/test/CodeGen/RISCV/intrinsics/trap.ll b/llvm/test/CodeGen/RISCV/intrinsics/trap.ll index e85073518ab9c..1a16a30642472 100644 --- a/llvm/test/CodeGen/RISCV/intrinsics/trap.ll +++ b/llvm/test/CodeGen/RISCV/intrinsics/trap.ll @@ -6,9 +6,6 @@ ; Verify that we lower @llvm.trap() and @llvm.debugtrap() correctly. -declare void @llvm.trap() -declare void @llvm.debugtrap() - define void @test_trap() nounwind { ; RV32I-LABEL: test_trap: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll index 541fb37742570..34a58832d912c 100644 --- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll @@ -113,8 +113,6 @@ define i64 @mul64(i64 %a, i64 %b) nounwind { ; Half libcalls: -declare half @llvm.sin.f16(half) - define half @sin_f16(half %a) nounwind { ; RV32IFD-ILP32D-LABEL: sin_f16: ; RV32IFD-ILP32D: # %bb.0: @@ -233,8 +231,6 @@ define half @sin_f16(half %a) nounwind { ; Float libcalls: -declare float @llvm.sin.f32(float) - define float @sin_f32(float %a) nounwind { ; F-ABI-ALL-LABEL: sin_f32: ; F-ABI-ALL: # %bb.0: @@ -265,8 +261,6 @@ define float @sin_f32(float %a) nounwind { ret float %1 } -declare float @llvm.powi.f32.i32(float, i32) - define float @powi_f32(float %a, i32 %b) nounwind { ; RV32IFD-ILP32D-LABEL: powi_f32: ; RV32IFD-ILP32D: # %bb.0: @@ -322,8 +316,6 @@ define float @powi_f32(float %a, i32 %b) nounwind { ret float %1 } -declare i64 @llvm.llround.i64.f32(float) - define i64 @llround_f32(float %a) nounwind { ; RV32-ALL-LABEL: llround_f32: ; RV32-ALL: # %bb.0: @@ -364,8 +356,6 @@ define i64 @llround_f32(float %a) nounwind { ; Double libcalls: -declare double @llvm.sin.f64(double) - define double @sin_f64(double %a) nounwind { ; D-ABI-ALL-LABEL: sin_f64: ; D-ABI-ALL: # %bb.0: @@ -414,8 +404,6 @@ define double @sin_f64(double %a) nounwind { ret double %1 } -declare double @llvm.powi.f64.i32(double, i32) - define double @powi_f64(double %a, i32 %b) nounwind { ; RV32IFD-ILP32D-LABEL: powi_f64: ; RV32IFD-ILP32D: # %bb.0: @@ -472,8 +460,6 @@ define double @powi_f64(double %a, i32 %b) nounwind { ret double %1 } -declare i64 @llvm.llround.i64.f64(double) - define i64 @llround_f64(double %a) nounwind { ; RV32-ALL-LABEL: llround_f64: ; RV32-ALL: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir index 6da655ced8125..b8d69625e6a95 100644 --- a/llvm/test/CodeGen/RISCV/live-sp.mir +++ b/llvm/test/CodeGen/RISCV/live-sp.mir @@ -17,11 +17,6 @@ ret void } - ; Function Attrs: nofree nosync nounwind readnone willreturn - declare ptr @llvm.returnaddress(i32 immarg) #0 - - attributes #0 = { nofree nosync nounwind readnone willreturn } - ... --- name: test1 diff --git a/llvm/test/CodeGen/RISCV/llvm.exp10.ll b/llvm/test/CodeGen/RISCV/llvm.exp10.ll index 7b199504837e8..5a4ce01e2f351 100644 --- a/llvm/test/CodeGen/RISCV/llvm.exp10.ll +++ b/llvm/test/CodeGen/RISCV/llvm.exp10.ll @@ -6,19 +6,6 @@ ; RUN: -verify-machineinstrs -target-abi=lp64d < %s \ ; RUN: | FileCheck -check-prefixes=CHECK,RV64IFD %s -declare <1 x half> @llvm.exp10.v1f16(<1 x half>) -declare <2 x half> @llvm.exp10.v2f16(<2 x half>) -declare <3 x half> @llvm.exp10.v3f16(<3 x half>) -declare <4 x half> @llvm.exp10.v4f16(<4 x half>) -declare <1 x float> @llvm.exp10.v1f32(<1 x float>) -declare <2 x float> @llvm.exp10.v2f32(<2 x float>) -declare <3 x float> @llvm.exp10.v3f32(<3 x float>) -declare <4 x float> @llvm.exp10.v4f32(<4 x float>) -declare <1 x double> @llvm.exp10.v1f64(<1 x double>) -declare <2 x double> @llvm.exp10.v2f64(<2 x double>) -declare <3 x double> @llvm.exp10.v3f64(<3 x double>) -declare <4 x double> @llvm.exp10.v4f64(<4 x double>) - define <1 x half> @exp10_v1f16(<1 x half> %x) { ; RV32IFD-LABEL: exp10_v1f16: ; RV32IFD: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/llvm.frexp.ll b/llvm/test/CodeGen/RISCV/llvm.frexp.ll index 4a77b4d32cdda..4ba3785b85a5e 100644 --- a/llvm/test/CodeGen/RISCV/llvm.frexp.ll +++ b/llvm/test/CodeGen/RISCV/llvm.frexp.ll @@ -1933,19 +1933,3 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind { ret i32 %result.0 } -declare { float, i32 } @llvm.frexp.f32.i32(float) #0 -declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) #0 -declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>) #0 - -declare { half, i32 } @llvm.frexp.f16.i32(half) #0 -declare { <2 x half>, <2 x i32> } @llvm.frexp.v2f16.v2i32(<2 x half>) #0 - -declare { double, i32 } @llvm.frexp.f64.i32(double) #0 -declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) #0 - -declare { half, i16 } @llvm.frexp.f16.i16(half) #0 -declare { <2 x half>, <2 x i16> } @llvm.frexp.v2f16.v2i16(<2 x half>) #0 - -declare { fp128, i32 } @llvm.frexp.f128.i32(fp128) #0 - -attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll index 69eca6dd7768a..326cf7bc179ce 100644 --- a/llvm/test/CodeGen/RISCV/machine-combiner.ll +++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll @@ -1070,29 +1070,6 @@ define double @test_fmax_f64(double %a0, double %a1, double %a2, double %a3) { ret double %t2 } -declare i8 @llvm.umin.i8(i8 %a, i8 %b) -declare i16 @llvm.umin.i16(i16 %a, i16 %b) -declare i32 @llvm.umin.i32(i32 %a, i32 %b) -declare i64 @llvm.umin.i64(i64 %a, i64 %b) -declare i8 @llvm.smin.i8(i8 %a, i8 %b) -declare i16 @llvm.smin.i16(i16 %a, i16 %b) -declare i32 @llvm.smin.i32(i32 %a, i32 %b) -declare i64 @llvm.smin.i64(i64 %a, i64 %b) -declare i8 @llvm.umax.i8(i8 %a, i8 %b) -declare i16 @llvm.umax.i16(i16 %a, i16 %b) -declare i32 @llvm.umax.i32(i32 %a, i32 %b) -declare i64 @llvm.umax.i64(i64 %a, i64 %b) -declare i8 @llvm.smax.i8(i8 %a, i8 %b) -declare i16 @llvm.smax.i16(i16 %a, i16 %b) -declare i32 @llvm.smax.i32(i32 %a, i32 %b) -declare i64 @llvm.smax.i64(i64 %a, i64 %b) -declare half @llvm.minnum.f16(half, half) -declare float @llvm.minnum.f32(float, float) -declare double @llvm.minnum.f64(double, double) -declare half @llvm.maxnum.f16(half, half) -declare float @llvm.maxnum.f32(float, float) -declare double @llvm.maxnum.f64(double, double) - define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a3, i64 %flag) { ; CHECK-LABEL: test_fmadd_strategy: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/machine-cse.ll b/llvm/test/CodeGen/RISCV/machine-cse.ll index 58cc042f90e40..aaf4320284be1 100644 --- a/llvm/test/CodeGen/RISCV/machine-cse.ll +++ b/llvm/test/CodeGen/RISCV/machine-cse.ll @@ -79,8 +79,6 @@ falseblock: ret void } -declare half @llvm.fma.f16(half, half, half) - define void @commute_fmadd_f16(half %x, half %y, half %z, ptr %p1, ptr %p2, i1 zeroext %cond) { ; RV32-LABEL: commute_fmadd_f16: ; RV32: # %bb.0: @@ -114,8 +112,6 @@ falseblock: ret void } -declare float @llvm.fma.f32(float, float, float) - define void @commute_fmadd_f32(float %x, float %y, float %z, ptr %p1, ptr %p2, i1 zeroext %cond) { ; RV32-LABEL: commute_fmadd_f32: ; RV32: # %bb.0: @@ -149,8 +145,6 @@ falseblock: ret void } -declare double @llvm.fma.f64(double, double, double) - define void @commute_fmadd_f64(double %x, double %y, double %z, ptr %p1, ptr %p2, i1 zeroext %cond) { ; RV32-LABEL: commute_fmadd_f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll b/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll index 83e9bf661ab1c..11047d1c758ea 100644 --- a/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll +++ b/llvm/test/CodeGen/RISCV/machinelicm-constant-phys-reg.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i32 @llvm.vector.reduce.add.nxv2i32() - define i32 @test(ptr %a, i64 %n) { ; CHECK-LABEL: test: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/memcpy-inline.ll index 833e07351eec7..d150ab1dddc6b 100644 --- a/llvm/test/CodeGen/RISCV/memcpy-inline.ll +++ b/llvm/test/CodeGen/RISCV/memcpy-inline.ll @@ -933,7 +933,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-BOTH-LABEL: memcpy16_align4: ; RV32-BOTH: # %bb.0: # %entry @@ -1020,6 +1019,3 @@ entry: ret i32 0 } - -declare void @llvm.memcpy.inline.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll b/llvm/test/CodeGen/RISCV/memcpy.ll index 447fc26b0106e..680ddba73f07f 100644 --- a/llvm/test/CodeGen/RISCV/memcpy.ll +++ b/llvm/test/CodeGen/RISCV/memcpy.ll @@ -674,7 +674,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-BOTH-LABEL: memcpy16_align4: ; RV32-BOTH: # %bb.0: # %entry @@ -761,5 +760,3 @@ entry: ret i32 0 } -declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/memmove.ll b/llvm/test/CodeGen/RISCV/memmove.ll index 62915bd4ad99d..1fffe359389b0 100644 --- a/llvm/test/CodeGen/RISCV/memmove.ll +++ b/llvm/test/CodeGen/RISCV/memmove.ll @@ -600,7 +600,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memmove16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-BOTH-LABEL: memmove16_align4: ; RV32-BOTH: # %bb.0: # %entry @@ -667,4 +666,3 @@ entry: ret i32 0 } -declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll index 40915241543ee..a03961b0dd5c9 100644 --- a/llvm/test/CodeGen/RISCV/memset-inline.ll +++ b/llvm/test/CodeGen/RISCV/memset-inline.ll @@ -9,9 +9,6 @@ ; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } -declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind -declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind - ; ///////////////////////////////////////////////////////////////////////////// define void @memset_1(ptr %a, i8 %value) nounwind { @@ -1164,7 +1161,6 @@ define void @aligned_memset_zero_8(ptr %a) nounwind { ret void } - define void @aligned_memset_zero_16(ptr %a) nounwind { ; RV32-BOTH-LABEL: aligned_memset_zero_16: ; RV32-BOTH: # %bb.0: @@ -1243,7 +1239,6 @@ define void @aligned_memset_zero_64(ptr %a) nounwind { ret void } - ; ///////////////////////////////////////////////////////////////////////////// ; Usual overlap tricks diff --git a/llvm/test/CodeGen/RISCV/min-max.ll b/llvm/test/CodeGen/RISCV/min-max.ll index e7f6899f18d16..71859431de923 100644 --- a/llvm/test/CodeGen/RISCV/min-max.ll +++ b/llvm/test/CodeGen/RISCV/min-max.ll @@ -14,8 +14,6 @@ ; Basic tests. -declare i8 @llvm.smax.i8(i8 %a, i8 %b) readnone - define signext i8 @smax_i8(i8 signext %a, i8 signext %b) { ; NOZBB-LABEL: smax_i8: ; NOZBB: # %bb.0: @@ -54,8 +52,6 @@ define signext i8 @smax_i8(i8 signext %a, i8 signext %b) { ret i8 %c } -declare i16 @llvm.smax.i16(i16 %a, i16 %b) readnone - define signext i16 @smax_i16(i16 signext %a, i16 signext %b) { ; NOZBB-LABEL: smax_i16: ; NOZBB: # %bb.0: @@ -94,8 +90,6 @@ define signext i16 @smax_i16(i16 signext %a, i16 signext %b) { ret i16 %c } -declare i32 @llvm.smax.i32(i32 %a, i32 %b) readnone - define signext i32 @smax_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: smax_i32: ; NOZBB: # %bb.0: @@ -134,8 +128,6 @@ define signext i32 @smax_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.smax.i64(i64 %a, i64 %b) readnone - define i64 @smax_i64(i64 %a, i64 %b) { ; RV32I-LABEL: smax_i64: ; RV32I: # %bb.0: @@ -220,8 +212,6 @@ define i64 @smax_i64(i64 %a, i64 %b) { ret i64 %c } -declare i8 @llvm.smin.i8(i8 %a, i8 %b) readnone - define signext i8 @smin_i8(i8 signext %a, i8 signext %b) { ; NOZBB-LABEL: smin_i8: ; NOZBB: # %bb.0: @@ -260,8 +250,6 @@ define signext i8 @smin_i8(i8 signext %a, i8 signext %b) { ret i8 %c } -declare i16 @llvm.smin.i16(i16 %a, i16 %b) readnone - define signext i16 @smin_i16(i16 signext %a, i16 signext %b) { ; NOZBB-LABEL: smin_i16: ; NOZBB: # %bb.0: @@ -300,8 +288,6 @@ define signext i16 @smin_i16(i16 signext %a, i16 signext %b) { ret i16 %c } -declare i32 @llvm.smin.i32(i32 %a, i32 %b) readnone - define signext i32 @smin_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: smin_i32: ; NOZBB: # %bb.0: @@ -340,8 +326,6 @@ define signext i32 @smin_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.smin.i64(i64 %a, i64 %b) readnone - define i64 @smin_i64(i64 %a, i64 %b) { ; RV32I-LABEL: smin_i64: ; RV32I: # %bb.0: @@ -426,8 +410,6 @@ define i64 @smin_i64(i64 %a, i64 %b) { ret i64 %c } -declare i8 @llvm.umax.i8(i8 %a, i8 %b) readnone - define i8 @umax_i8(i8 zeroext %a, i8 zeroext %b) { ; NOZBB-LABEL: umax_i8: ; NOZBB: # %bb.0: @@ -466,8 +448,6 @@ define i8 @umax_i8(i8 zeroext %a, i8 zeroext %b) { ret i8 %c } -declare i16 @llvm.umax.i16(i16 %a, i16 %b) readnone - define i16 @umax_i16(i16 zeroext %a, i16 zeroext %b) { ; NOZBB-LABEL: umax_i16: ; NOZBB: # %bb.0: @@ -506,8 +486,6 @@ define i16 @umax_i16(i16 zeroext %a, i16 zeroext %b) { ret i16 %c } -declare i32 @llvm.umax.i32(i32 %a, i32 %b) readnone - define signext i32 @umax_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: umax_i32: ; NOZBB: # %bb.0: @@ -546,8 +524,6 @@ define signext i32 @umax_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.umax.i64(i64 %a, i64 %b) readnone - define i64 @umax_i64(i64 %a, i64 %b) { ; RV32I-LABEL: umax_i64: ; RV32I: # %bb.0: @@ -632,8 +608,6 @@ define i64 @umax_i64(i64 %a, i64 %b) { ret i64 %c } -declare i8 @llvm.umin.i8(i8 %a, i8 %b) readnone - define zeroext i8 @umin_i8(i8 zeroext %a, i8 zeroext %b) { ; NOZBB-LABEL: umin_i8: ; NOZBB: # %bb.0: @@ -672,8 +646,6 @@ define zeroext i8 @umin_i8(i8 zeroext %a, i8 zeroext %b) { ret i8 %c } -declare i16 @llvm.umin.i16(i16 %a, i16 %b) readnone - define zeroext i16 @umin_i16(i16 zeroext %a, i16 zeroext %b) { ; NOZBB-LABEL: umin_i16: ; NOZBB: # %bb.0: @@ -712,8 +684,6 @@ define zeroext i16 @umin_i16(i16 zeroext %a, i16 zeroext %b) { ret i16 %c } -declare i32 @llvm.umin.i32(i32 %a, i32 %b) readnone - define signext i32 @umin_i32(i32 signext %a, i32 signext %b) { ; NOZBB-LABEL: umin_i32: ; NOZBB: # %bb.0: @@ -752,8 +722,6 @@ define signext i32 @umin_i32(i32 signext %a, i32 signext %b) { ret i32 %c } -declare i64 @llvm.umin.i64(i64 %a, i64 %b) readnone - define i64 @umin_i64(i64 %a, i64 %b) { ; RV32I-LABEL: umin_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll index 395fc99ea0536..3803ac82458bd 100644 --- a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll +++ b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m < %s \ ; RUN: | FileCheck %s - @_ZTIi = external dso_local constant ptr declare void @_Z3fooiiiiiiiiiiPi(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3, i32 signext %4, i32 signext %5, i32 signext %6, i32 signext %7, i32 %8, i32 %9, i32 %10) @@ -84,8 +83,6 @@ ehcleanup: declare i32 @__gxx_personality_v0(...) -declare i32 @llvm.eh.typeid.for(ptr) - declare ptr @__cxa_begin_catch(ptr) declare void @__cxa_end_catch() diff --git a/llvm/test/CodeGen/RISCV/module-target-abi3.ll b/llvm/test/CodeGen/RISCV/module-target-abi3.ll index 5df750c0d4b6e..1d3fc8f9c9a90 100644 --- a/llvm/test/CodeGen/RISCV/module-target-abi3.ll +++ b/llvm/test/CodeGen/RISCV/module-target-abi3.ll @@ -2,6 +2,5 @@ ; CHECK: Flags: 0x2, single-float ABI -attributes #0 = { "target-features"="+f" } !llvm.module.flags = !{!0} !0 = !{i32 1, !"target-abi", !"ilp32f"} diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll index f9ccf7637eee9..cddd1a0331cad 100644 --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -8,9 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64ZBB -declare i32 @llvm.abs.i32(i32, i1 immarg) -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i32 @neg_abs32(i32 %x) { ; RV32I-LABEL: neg_abs32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll index eca3df4861b90..5a4675fa52598 100644 --- a/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll +++ b/llvm/test/CodeGen/RISCV/overflow-intrinsic-optimizations.ll @@ -17,4 +17,3 @@ entry: ret i1 %7 } -declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/pei-crash.ll b/llvm/test/CodeGen/RISCV/pei-crash.ll index 7778f9580cf69..169033b8036ac 100644 --- a/llvm/test/CodeGen/RISCV/pei-crash.ll +++ b/llvm/test/CodeGen/RISCV/pei-crash.ll @@ -23,6 +23,4 @@ entry: ret i64 %0 } -declare i64 @llvm.readcyclecounter() #1 - attributes #0 = { noinline nounwind optnone } diff --git a/llvm/test/CodeGen/RISCV/pr135206.ll b/llvm/test/CodeGen/RISCV/pr135206.ll index 75b11c373895b..1ca372d528ecf 100644 --- a/llvm/test/CodeGen/RISCV/pr135206.ll +++ b/llvm/test/CodeGen/RISCV/pr135206.ll @@ -3,7 +3,6 @@ %"buff" = type { [4096 x i64] } -declare void @llvm.memset.p0.i64(ptr, i8, i64, i1) declare void @bar() define i1 @foo() nounwind "probe-stack"="inline-asm" "target-features"="+v" { diff --git a/llvm/test/CodeGen/RISCV/pr56457.ll b/llvm/test/CodeGen/RISCV/pr56457.ll index 0dca858089167..5e46e56103761 100644 --- a/llvm/test/CodeGen/RISCV/pr56457.ll +++ b/llvm/test/CodeGen/RISCV/pr56457.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s -declare i15 @llvm.ctlz.i15(i15, i1) - define i15 @foo(i15 %x) nounwind { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll index 33b89a405d8e3..fa447c8f4fec1 100644 --- a/llvm/test/CodeGen/RISCV/pr69586.ll +++ b/llvm/test/CodeGen/RISCV/pr69586.ll @@ -2413,8 +2413,3 @@ define void @test(ptr %0, ptr %1, i64 %2) { ret void } -declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64) -declare @llvm.riscv.vle.nxv4i32.i64(, ptr, i64) -declare void @llvm.riscv.sf.vc.vv.se.i64.nxv4i32.nxv4i32.i64(i64, i64, , , i64) -declare @llvm.riscv.sf.vc.v.i.se.nxv4i32.i64.i64.i64(i64, i64, i64, i64) -declare void @llvm.riscv.vse.nxv4i32.i64(, ptr, i64) diff --git a/llvm/test/CodeGen/RISCV/pr92193.ll b/llvm/test/CodeGen/RISCV/pr92193.ll index 8c8398c4b45fa..8f197242db14a 100644 --- a/llvm/test/CodeGen/RISCV/pr92193.ll +++ b/llvm/test/CodeGen/RISCV/pr92193.ll @@ -18,4 +18,3 @@ entry: ret i16 %mul.0 } -declare i16 @llvm.vector.reduce.mul.v4i32(<4 x i16>) diff --git a/llvm/test/CodeGen/RISCV/prefetch.ll b/llvm/test/CodeGen/RISCV/prefetch.ll index bc46c60c053f3..ba33ed7ac1a59 100644 --- a/llvm/test/CodeGen/RISCV/prefetch.ll +++ b/llvm/test/CodeGen/RISCV/prefetch.ll @@ -10,8 +10,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zicbop,+zihintntl -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64ZICBOPZIHINTNTL %s -declare void @llvm.prefetch(ptr, i32, i32, i32) - define void @test_prefetch_read_locality_0(ptr %a) nounwind { ; RV32I-LABEL: test_prefetch_read_locality_0: ; RV32I: # %bb.0: @@ -264,7 +262,6 @@ define void @test_prefetch_instruction_locality_2(ptr %a) nounwind { ret void } - define void @test_prefetch_read_locality_3(ptr %a) nounwind { ; RV32I-LABEL: test_prefetch_read_locality_3: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll index 5e949f8969e3e..0d289c606eff4 100644 --- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll +++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll @@ -1132,9 +1132,6 @@ entry: ; Check that functions with varargs do not use save/restore code -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - define i32 @varargs(ptr %fmt, ...) { ; RV32IZCMP-LABEL: varargs: ; RV32IZCMP: # %bb.0: @@ -1579,8 +1576,6 @@ entry: ; Check that dynamic allocation calculations remain correct -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) declare void @notdead(ptr) define void @alloca(i32 %n) { @@ -4004,7 +3999,6 @@ define void @callee_no_irq() { } declare void @bar(ptr, ptr) -declare ptr @llvm.frameaddress.p0(i32 immarg) define i32 @use_fp(i32 %x) { ; RV32IZCMP-LABEL: use_fp: diff --git a/llvm/test/CodeGen/RISCV/readcyclecounter.ll b/llvm/test/CodeGen/RISCV/readcyclecounter.ll index c22417cd0390f..83509901b59a6 100644 --- a/llvm/test/CodeGen/RISCV/readcyclecounter.ll +++ b/llvm/test/CodeGen/RISCV/readcyclecounter.ll @@ -6,8 +6,6 @@ ; Verify that we lower @llvm.readcyclecounter() correctly. -declare i64 @llvm.readcyclecounter() - define i64 @test_builtin_readcyclecounter() nounwind { ; RV32I-LABEL: test_builtin_readcyclecounter: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/readsteadycounter.ll b/llvm/test/CodeGen/RISCV/readsteadycounter.ll index 19eab64530c66..464d03814ba5d 100644 --- a/llvm/test/CodeGen/RISCV/readsteadycounter.ll +++ b/llvm/test/CodeGen/RISCV/readsteadycounter.ll @@ -6,8 +6,6 @@ ; Verify that we lower @llvm.readsteadycounter() correctly. -declare i64 @llvm.readsteadycounter() - define i64 @test_builtin_readsteadycounter() nounwind { ; RV32I-LABEL: test_builtin_readsteadycounter: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll b/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll index 15b5698c22e81..705009c6deb70 100644 --- a/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll +++ b/llvm/test/CodeGen/RISCV/redundant-copy-from-tail-duplicate.ll @@ -1,7 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s - define signext i32 @sum(ptr %a, i32 signext %n, i1 %prof.min.iters.check, %0, %1) { ; CHECK-LABEL: sum: ; CHECK: # %bb.0: # %entry @@ -47,4 +46,3 @@ for.end: ; preds = %for.body, %vector.p ret i32 %red.0.lcssa } -declare i32 @llvm.vp.reduce.add.nxv8i32(i32, , , i32) diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll index bfbe70685cbec..ae797de91b857 100644 --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -128,11 +128,3 @@ entry: } declare void @func() -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", , 2), ptr nocapture, , i64, i64) -declare @llvm.riscv.tuple.extract.v16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), i32) -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(, , , , i64, i64, i64 immarg) -declare @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(, , , , i64, i64 immarg) -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(, , , i64, i64) -declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(, , , , i64, i64 immarg) -declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(, , , , i64, i64, i64 immarg) -declare void @llvm.riscv.vse.nxv16f32.i64(, ptr nocapture, i64) #3 diff --git a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll index aa63552eb4b63..649d91c2d747b 100644 --- a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll +++ b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll @@ -444,48 +444,6 @@ define @frem_f32( %in) { ret %1 } -declare @llvm.ceil.nxv2f64() -declare @llvm.ceil.nxv4f32() -declare @llvm.copysign.nxv2f64(, ) -declare @llvm.copysign.nxv4f32(, ) -declare @llvm.cos.nxv2f64() -declare @llvm.cos.nxv4f32() -declare @llvm.exp.nxv2f64() -declare @llvm.exp.nxv4f32() -declare @llvm.exp2.nxv2f64() -declare @llvm.exp2.nxv4f32() -declare @llvm.exp10.nxv2f64() -declare @llvm.exp10.nxv4f32() -declare @llvm.fabs.nxv2f64() -declare @llvm.fabs.nxv4f32() -declare @llvm.floor.nxv2f64() -declare @llvm.floor.nxv4f32() -declare @llvm.fma.nxv2f64(, , ) -declare @llvm.fma.nxv4f32(, , ) -declare @llvm.log.nxv2f64() -declare @llvm.log.nxv4f32() -declare @llvm.log10.nxv2f64() -declare @llvm.log10.nxv4f32() -declare @llvm.log2.nxv2f64() -declare @llvm.log2.nxv4f32() -declare @llvm.maxnum.nxv2f64(, ) -declare @llvm.maxnum.nxv4f32(, ) -declare @llvm.minnum.nxv2f64(, ) -declare @llvm.minnum.nxv4f32(, ) -declare @llvm.nearbyint.nxv2f64() -declare @llvm.nearbyint.nxv4f32() -declare @llvm.pow.nxv2f64(, ) -declare @llvm.pow.nxv4f32(, ) -declare @llvm.rint.nxv2f64() -declare @llvm.rint.nxv4f32() -declare @llvm.round.nxv2f64() -declare @llvm.round.nxv4f32() -declare @llvm.sin.nxv2f64() -declare @llvm.sin.nxv4f32() -declare @llvm.sqrt.nxv2f64() -declare @llvm.sqrt.nxv4f32() -declare @llvm.trunc.nxv2f64() -declare @llvm.trunc.nxv4f32() ;. ; CHECK: attributes #[[ATTR0]] = { "target-features"="+v" } ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) "target-features"="+v" } diff --git a/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll b/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll index 6c6f5e20a8b48..46e06875efe7a 100644 --- a/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll +++ b/llvm/test/CodeGen/RISCV/riscv-zihintpause.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zihintpause -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RVPAUSE -declare void @llvm.riscv.pause() - define void @test_pause() { ; RVPAUSE-LABEL: test_pause: ; RVPAUSE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll index cf64650c964e8..ec1de5d3229fa 100644 --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -1436,7 +1436,6 @@ define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshl.i32(i32, i32, i32) define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotl_64_mask_shared: @@ -1602,7 +1601,6 @@ define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshl.i64(i64, i64, i64) define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotr_32_mask_shared: @@ -1664,7 +1662,6 @@ define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 sign %3 = add i32 %1, %2 ret i32 %3 } -declare i32 @llvm.fshr.i32(i32, i32, i32) define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotr_64_mask_shared: @@ -1828,7 +1825,6 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign %3 = add i64 %1, %2 ret i64 %3 } -declare i64 @llvm.fshr.i64(i64, i64, i64) define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotl_32_mask_multiple: diff --git a/llvm/test/CodeGen/RISCV/rv32p.ll b/llvm/test/CodeGen/RISCV/rv32p.ll index 4eee880a398a9..8a81846da9da2 100644 --- a/llvm/test/CodeGen/RISCV/rv32p.ll +++ b/llvm/test/CodeGen/RISCV/rv32p.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; CHECK-LABEL: ctlz_i32: ; CHECK: # %bb.0: @@ -13,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; CHECK-LABEL: ctlz_i64: ; CHECK: # %bb.0: @@ -32,8 +28,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; CHECK-LABEL: cttz_i32: ; CHECK: # %bb.0: @@ -53,8 +47,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; CHECK-LABEL: cttz_i64: ; CHECK: # %bb.0: @@ -273,8 +265,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; CHECK-LABEL: abs_i32: ; CHECK: # %bb.0: @@ -284,8 +274,6 @@ define i32 @abs_i32(i32 %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -322,8 +310,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; CHECK-LABEL: bswap_i32: ; CHECK: # %bb.0: @@ -333,8 +319,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; CHECK-LABEL: bswap_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll index aa02d46c34550..ff236c72922f4 100644 --- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll @@ -6,8 +6,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+xtheadbb,+b -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV32XTHEADBB,RV32XTHEADBB-B -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: @@ -63,8 +61,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV32I-LABEL: ctlz_i64: ; RV32I: # %bb.0: @@ -171,8 +167,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: @@ -220,8 +214,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: cttz_i64: ; RV32I: # %bb.0: @@ -578,8 +570,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: bswap_i32: ; RV32I: # %bb.0: @@ -609,8 +599,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV32I-LABEL: bswap_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll index 1a6c87465d026..b97277aeee708 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBB -declare i32 @llvm.riscv.orc.b.i32(i32) - define i32 @orcb(i32 %a) nounwind { ; RV32ZBB-LABEL: orcb: ; RV32ZBB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll index 7ab3d7c694568..d7f55f23a3c56 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll @@ -245,8 +245,6 @@ define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind { ret i64 %not } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: rol_i32: ; RV32I: # %bb.0: @@ -267,8 +265,6 @@ define i32 @rol_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: rol_i64: ; CHECK: # %bb.0: @@ -297,8 +293,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: ror_i32: ; RV32I: # %bb.0: @@ -319,8 +313,6 @@ define i32 @ror_i32(i32 %a, i32 %b) nounwind { ; This test is presented here in case future expansions of the Bitmanip ; extensions introduce instructions suitable for this pattern. -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: ror_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll index a1a843a7c1ba7..dad71ee5de066 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV32ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: @@ -56,8 +54,6 @@ define i32 @ctlz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV32I-LABEL: ctlz_i64: ; RV32I: # %bb.0: @@ -151,8 +147,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: @@ -185,8 +179,6 @@ define i32 @cttz_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: cttz_i64: ; RV32I: # %bb.0: @@ -258,8 +250,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define i32 @ctpop_i32(i32 %a) nounwind { ; RV32I-LABEL: ctpop_i32: ; RV32I: # %bb.0: @@ -370,8 +360,6 @@ define i1 @ctpop_i32_ne_one(i32 signext %a) nounwind { ret i1 %2 } -declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) - define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind { ; RV32I-LABEL: ctpop_v2i32: ; RV32I: # %bb.0: @@ -484,8 +472,6 @@ define <2 x i1> @ctpop_v2i32_ne_one(<2 x i32> %a) nounwind { ret <2 x i1> %2 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV32I-LABEL: ctpop_i64: ; RV32I: # %bb.0: @@ -649,8 +635,6 @@ define i1 @ctpop_i64_ne_one(i64 %a) nounwind { ret i1 %2 } -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) - define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind { ; RV32I-LABEL: ctpop_v2i64: ; RV32I: # %bb.0: @@ -1127,8 +1111,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV32I-LABEL: abs_i32: ; RV32I: # %bb.0: @@ -1146,8 +1128,6 @@ define i32 @abs_i32(i32 %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -1195,8 +1175,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define i32 @bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: bswap_i32: ; RV32I: # %bb.0: @@ -1221,8 +1199,6 @@ define i32 @bswap_i32(i32 %a) nounwind { ret i32 %1 } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV32I-LABEL: bswap_i64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll index fcd1671bc04d0..0f53acdfc7bf3 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBC -declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) - define i32 @clmul32r(i32 %a, i32 %b) nounwind { ; RV32ZBC-LABEL: clmul32r: ; RV32ZBC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll index 51fd086e26dfe..b40e8c28db7ee 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbkc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBC-ZBKC -declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) - define i32 @clmul32(i32 %a, i32 %b) nounwind { ; RV32ZBC-ZBKC-LABEL: clmul32: ; RV32ZBC-ZBKC: # %bb.0: @@ -15,8 +13,6 @@ define i32 @clmul32(i32 %a, i32 %b) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) - define i32 @clmul32h(i32 %a, i32 %b) nounwind { ; RV32ZBC-ZBKC-LABEL: clmul32h: ; RV32ZBC-ZBKC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll index a4d76f8e82103..0b92498f221d9 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBKB -declare i32 @llvm.riscv.brev8(i32); - define i32 @brev8(i32 %a) nounwind { ; RV32ZBKB-LABEL: brev8: ; RV32ZBKB: # %bb.0: @@ -25,8 +23,6 @@ define zeroext i16 @brev8_knownbits(i16 zeroext %a) nounwind { ret i16 %trunc } -declare i32 @llvm.bswap.i32(i32) - define i32 @rev8_i32(i32 %a) nounwind { ; RV32ZBKB-LABEL: rev8_i32: ; RV32ZBKB: # %bb.0: @@ -36,8 +32,6 @@ define i32 @rev8_i32(i32 %a) nounwind { ret i32 %1 } -declare i32 @llvm.riscv.zip(i32); - define i32 @zip(i32 %a) nounwind { ; RV32ZBKB-LABEL: zip: ; RV32ZBKB: # %bb.0: @@ -47,8 +41,6 @@ define i32 @zip(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.unzip(i32); - define i32 @unzip(i32 %a) nounwind { ; RV32ZBKB-LABEL: unzip: ; RV32ZBKB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll index eb94c20e1f44e..11c00641bb66d 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll @@ -1,8 +1,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbkx -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBKX -declare i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b) - define i32 @xperm8(i32 %a, i32 %b) nounwind { ; RV32ZBKX-LABEL: xperm8: ; RV32ZBKX: # %bb.0: @@ -12,8 +10,6 @@ define i32 @xperm8(i32 %a, i32 %b) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b) - define i32 @xperm4(i32 %a, i32 %b) nounwind { ; RV32ZBKX-LABEL: xperm4: ; RV32ZBKX: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll index 8e843fa47db69..f56ee0011e413 100644 --- a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zimop -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZIMOP -declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b) - define i32 @mopr0_32(i32 %a) nounwind { ; RV32ZIMOP-LABEL: mopr0_32: ; RV32ZIMOP: # %bb.0: @@ -22,8 +20,6 @@ define i32 @mopr31_32(i32 %a) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c) - define i32 @moprr0_32(i32 %a, i32 %b) nounwind { ; RV32ZIMOP-LABEL: moprr0_32: ; RV32ZIMOP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll index 33c5839fde586..6e6d18490782c 100644 --- a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKND -declare i32 @llvm.riscv.aes32dsi(i32, i32, i8); - define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsi: ; RV32ZKND: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32dsmi(i32, i32, i8); - define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsmi: ; RV32ZKND: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll index 9535d127f244b..2c69ddd684f80 100644 --- a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKND -declare i32 @llvm.riscv.aes32dsi(i32, i32, i32); - define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsi: ; RV32ZKND: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32dsi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32dsmi(i32, i32, i32); - define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { ; RV32ZKND-LABEL: aes32dsmi: ; RV32ZKND: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll index 296641ca593e6..6cdad6682b70c 100644 --- a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKNE -declare i32 @llvm.riscv.aes32esi(i32, i32, i8); - define i32 @aes32esi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esi: ; RV32ZKNE: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32esi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32esmi(i32, i32, i8); - define i32 @aes32esmi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esmi: ; RV32ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll index 5859426823c8a..6dd22001f815e 100644 --- a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKNE -declare i32 @llvm.riscv.aes32esi(i32, i32, i32); - define i32 @aes32esi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esi: ; RV32ZKNE: # %bb.0: @@ -13,8 +11,6 @@ define i32 @aes32esi(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.aes32esmi(i32, i32, i32); - define i32 @aes32esmi(i32 %a, i32 %b) nounwind { ; RV32ZKNE-LABEL: aes32esmi: ; RV32ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll index 906285e320d12..a3846bcf69180 100644 --- a/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zknh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKNH - -declare i32 @llvm.riscv.sha256sig0(i32); - define i32 @sha256sig0_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sig0_i32: ; RV32ZKNH: # %bb.0: @@ -14,8 +11,6 @@ define i32 @sha256sig0_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sig1(i32); - define i32 @sha256sig1_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sig1_i32: ; RV32ZKNH: # %bb.0: @@ -25,8 +20,6 @@ define i32 @sha256sig1_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum0(i32); - define i32 @sha256sum0_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sum0_i32: ; RV32ZKNH: # %bb.0: @@ -36,8 +29,6 @@ define i32 @sha256sum0_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum1(i32); - define i32 @sha256sum1_i32(i32 %a) nounwind { ; RV32ZKNH-LABEL: sha256sum1_i32: ; RV32ZKNH: # %bb.0: @@ -47,8 +38,6 @@ define i32 @sha256sum1_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig0l(i32, i32); - define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig0l: ; RV32ZKNH: # %bb.0: @@ -58,8 +47,6 @@ define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig0h(i32, i32); - define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig0h: ; RV32ZKNH: # %bb.0: @@ -69,8 +56,6 @@ define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig1l(i32, i32); - define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig1l: ; RV32ZKNH: # %bb.0: @@ -80,8 +65,6 @@ define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sig1h(i32, i32); - define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sig1h: ; RV32ZKNH: # %bb.0: @@ -91,8 +74,6 @@ define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sum0r(i32, i32); - define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sum0r: ; RV32ZKNH: # %bb.0: @@ -102,8 +83,6 @@ define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha512sum1r(i32, i32); - define i32 @sha512sum1r(i32 %a, i32 %b) nounwind { ; RV32ZKNH-LABEL: sha512sum1r: ; RV32ZKNH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll index e8ecb4f3decd2..b435cb6a4cec0 100644 --- a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKSED -declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i8); - define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ks_i32: ; RV32ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i8); - define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ed_i32: ; RV32ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll index e29c515cb8319..dde131e778d19 100644 --- a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKSED -declare i32 @llvm.riscv.sm4ks(i32, i32, i32); - define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ks_i32: ; RV32ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm4ed(i32, i32, i32); - define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind { ; RV32ZKSED-LABEL: sm4ed_i32: ; RV32ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll index df2703f996f96..c92e8a3358985 100644 --- a/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zksh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZKSH -declare i32 @llvm.riscv.sm3p0(i32); - define i32 @sm3p0_i32(i32 %a) nounwind { ; RV32ZKSH-LABEL: sm3p0_i32: ; RV32ZKSH: # %bb.0: @@ -13,8 +11,6 @@ define i32 @sm3p0_i32(i32 %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm3p1(i32); - define i32 @sm3p1_i32(i32 %a) nounwind { ; RV32ZKSH-LABEL: sm3p1_i32: ; RV32ZKSH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll index a919452389c43..c310c105767ff 100644 --- a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll @@ -195,7 +195,6 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind { %1 = tail call i128 @llvm.fptosi.sat.i128.f64(double %a) ret i128 %1 } -declare i128 @llvm.fptosi.sat.i128.f64(double) define i128 @fptoui_sat_f64_to_i128(double %a) nounwind { ; RV64I-LABEL: fptoui_sat_f64_to_i128: @@ -286,4 +285,3 @@ define i128 @fptoui_sat_f64_to_i128(double %a) nounwind { %1 = tail call i128 @llvm.fptoui.sat.i128.f64(double %a) ret i128 %1 } -declare i128 @llvm.fptoui.sat.i128.f64(double) diff --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll index 0af75a789f7a2..fcbbb8235c629 100644 --- a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll @@ -191,7 +191,6 @@ define i128 @fptosi_sat_f32_to_i128(float %a) nounwind { %1 = tail call i128 @llvm.fptosi.sat.i128.f32(float %a) ret i128 %1 } -declare i128 @llvm.fptosi.sat.i128.f32(float) define i128 @fptoui_sat_f32_to_i128(float %a) nounwind { ; RV64I-LABEL: fptoui_sat_f32_to_i128: @@ -278,4 +277,3 @@ define i128 @fptoui_sat_f32_to_i128(float %a) nounwind { %1 = tail call i128 @llvm.fptoui.sat.i128.f32(float %a) ret i128 %1 } -declare i128 @llvm.fptoui.sat.i128.f32(float) diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll index d8f3816b85485..7ff1d7684d1ee 100644 --- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll @@ -269,7 +269,6 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind { %1 = tail call i128 @llvm.fptosi.sat.i128.f16(half %a) ret i128 %1 } -declare i128 @llvm.fptosi.sat.i128.f16(half) define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { ; RV64I-LABEL: fptoui_sat_f16_to_i128: @@ -358,6 +357,5 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { %1 = tail call i128 @llvm.fptoui.sat.i128.f16(half %a) ret i128 %1 } -declare i128 @llvm.fptoui.sat.i128.f16(half) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll index 0850cc65d81ee..a134fc89452fb 100644 --- a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll +++ b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll @@ -64,6 +64,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) -declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...) -declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll index 9437ac02962b6..62e2a6702bb38 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-args.ll @@ -19,4 +19,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll index 1bba9cbfd03c4..c2e5e19a8ec6a 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll @@ -19,4 +19,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll index bb2caeac4a976..68a7702a3df68 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll @@ -14,4 +14,3 @@ entry: ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll index 320a3aa94cd7d..92f509037139e 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll @@ -481,6 +481,3 @@ define void @floats(float %f, double %g, half %h, bfloat %i) { ret void } -declare void @llvm.experimental.stackmap(i64, i32, ...) -declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...) -declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll index 3ba49653cd01e..83ee899894ae4 100644 --- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll +++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x1.ll @@ -11,6 +11,4 @@ entry: ret void } - declare void @return_i1() -declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll index 9c99f64bcacc0..16e9d2d68a936 100644 --- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll +++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering-x2.ll @@ -16,8 +16,4 @@ entry: ret i1 %call2 } - declare i1 @return_i1() -declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) -declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32) -declare i1 @llvm.experimental.gc.result.i1(token) diff --git a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll index 1a0be244c824c..53240c5cdb24d 100644 --- a/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll @@ -267,17 +267,3 @@ entry: ret void } -declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) -declare i1 @llvm.experimental.gc.result.i1(token) - -declare i32 @llvm.experimental.gc.result.i32(token) - -declare ptr @llvm.experimental.gc.result.p0(token) - -declare float @llvm.experimental.gc.result.f32(token) - -declare %struct @llvm.experimental.gc.result.struct(token) - - - -declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32) diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll index 8a338a855c863..06a818516c149 100644 --- a/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll +++ b/llvm/test/CodeGen/RISCV/rv64-trampoline-cfi.ll @@ -4,8 +4,6 @@ ; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -mattr=+experimental-zicfilp -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64-LINUX %s -declare void @llvm.init.trampoline(ptr, ptr, ptr) -declare ptr @llvm.adjust.trampoline(ptr) declare i64 @f(ptr nest, i64) define i64 @test0(i64 %n, ptr %p) nounwind { diff --git a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll index c68fa59cd5780..2ff26e5274542 100644 --- a/llvm/test/CodeGen/RISCV/rv64-trampoline.ll +++ b/llvm/test/CodeGen/RISCV/rv64-trampoline.ll @@ -4,8 +4,6 @@ ; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64-LINUX %s -declare void @llvm.init.trampoline(ptr, ptr, ptr) -declare ptr @llvm.adjust.trampoline(ptr) declare i64 @f(ptr nest, i64) define i64 @test0(i64 %n, ptr %p) nounwind { diff --git a/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll index 8396d992164e4..cbb22803a7912 100644 --- a/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64d-double-convert-strict.ll @@ -22,7 +22,6 @@ define i32 @aext_fptosi(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) define signext i32 @sext_fptosi(double %a) nounwind strictfp { ; RV64ID-LABEL: sext_fptosi: @@ -69,7 +68,6 @@ define i32 @aext_fptoui(double %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) define signext i32 @sext_fptoui(double %a) nounwind strictfp { ; RV64ID-LABEL: sext_fptoui: @@ -112,7 +110,6 @@ define double @uitofp_aext_i32_to_f64(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata, metadata) define double @uitofp_sext_i32_to_f64(i32 signext %a) nounwind strictfp { ; RV64ID-LABEL: uitofp_sext_i32_to_f64: @@ -155,7 +152,6 @@ define double @sitofp_aext_i32_to_f64(i32 %a) nounwind strictfp { %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret double %1 } -declare double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata, metadata) define double @sitofp_sext_i32_to_f64(i32 signext %a) nounwind strictfp { ; RV64ID-LABEL: sitofp_sext_i32_to_f64: diff --git a/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll index 2b358ce075d60..af61656671292 100644 --- a/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64f-float-convert-strict.ll @@ -22,7 +22,6 @@ define i32 @aext_fptosi(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) define signext i32 @sext_fptosi(float %a) nounwind strictfp { ; RV64IF-LABEL: sext_fptosi: @@ -69,7 +68,6 @@ define i32 @aext_fptoui(float %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) define signext i32 @sext_fptoui(float %a) nounwind strictfp { ; RV64IF-LABEL: sext_fptoui: @@ -112,7 +110,6 @@ define float @uitofp_aext_i32_to_f32(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata) define float @uitofp_sext_i32_to_f32(i32 signext %a) nounwind strictfp { ; RV64IF-LABEL: uitofp_sext_i32_to_f32: @@ -155,7 +152,6 @@ define float @sitofp_aext_i32_to_f32(i32 %a) nounwind strictfp { %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret float %1 } -declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) define float @sitofp_sext_i32_to_f32(i32 signext %a) nounwind strictfp { ; RV64IF-LABEL: sitofp_sext_i32_to_f32: diff --git a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll index 6fdf2a3a939ce..5b9617cba08b4 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll @@ -7,9 +7,6 @@ ; The test cases check that we use the si versions of the conversions from ; double. -declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) -declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) - define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp { ; RV64I-LABEL: strict_fp64_to_ui32: ; RV64I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll index b645b621c75c6..c5ecb68b8a270 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll @@ -5,9 +5,6 @@ ; The test cases check that we use the si versions of the conversions from ; double. -declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) -declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) - define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp { ; RV64I-LABEL: strict_fp32_to_ui32: ; RV64I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rv64p.ll b/llvm/test/CodeGen/RISCV/rv64p.ll index f937f44f13320..ec242294c3036 100644 --- a/llvm/test/CodeGen/RISCV/rv64p.ll +++ b/llvm/test/CodeGen/RISCV/rv64p.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.ctlz.i32(i32, i1) - define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; CHECK-LABEL: ctlz_i32: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; CHECK-LABEL: ctlz_i64: ; CHECK: # %bb.0: @@ -77,8 +73,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; CHECK-LABEL: cttz_i32: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; CHECK-LABEL: cttz_i64: ; CHECK: # %bb.0: @@ -292,8 +284,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; CHECK-LABEL: abs_i32: ; CHECK: # %bb.0: @@ -312,8 +302,6 @@ define signext i32 @abs_i32_sext(i32 signext %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; CHECK-LABEL: abs_i64: ; CHECK: # %bb.0: @@ -323,8 +311,6 @@ define i64 @abs_i64(i64 %x) { ret i64 %abs } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; CHECK-LABEL: bswap_i32: ; CHECK: # %bb.0: @@ -348,8 +334,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; CHECK-LABEL: bswap_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll index c5707987408f7..c62fb0ae63555 100644 --- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll @@ -6,8 +6,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+xtheadbb,+b -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV64XTHEADBB,RV64XTHEADBB-B -declare i32 @llvm.ctlz.i32(i32, i1) - define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: ; RV64I: # %bb.0: @@ -321,8 +319,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: @@ -388,8 +384,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: @@ -625,8 +619,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: @@ -1020,8 +1012,6 @@ define i64 @zext_i64_srliw(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: @@ -1074,8 +1064,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV64I-LABEL: bswap_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll index 500d51be80a66..52415a31effad 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBB -declare i32 @llvm.riscv.orc.b.i32(i32) - define signext i32 @orcb32(i32 signext %a) nounwind { ; RV64ZBB-LABEL: orcb32: ; RV64ZBB: # %bb.0: @@ -43,8 +41,6 @@ define signext i32 @orcb32_knownbits(i32 signext %a) nounwind { ret i32 %tmp5 } -declare i64 @llvm.riscv.orc.b.i64(i64) - define i64 @orcb64(i64 %a) nounwind { ; RV64ZBB-LABEL: orcb64: ; RV64ZBB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll index f2c95f855e178..4460773290b7e 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll @@ -219,8 +219,6 @@ define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind { ret i64 %not } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: @@ -278,8 +276,6 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshl.i64(i64, i64, i64) - define i64 @rol_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: rol_i64: ; RV64I: # %bb.0: @@ -297,8 +293,6 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind { ret i64 %or } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: @@ -356,8 +350,6 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind { ret i32 %1 } -declare i64 @llvm.fshr.i64(i64, i64, i64) - define i64 @ror_i64(i64 %a, i64 %b) nounwind { ; RV64I-LABEL: ror_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll index d8b7bfcbceb27..b3581459c2622 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=CHECK,RV64ZBB -declare i32 @llvm.ctlz.i32(i32, i1) - define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: ; RV64I: # %bb.0: @@ -278,8 +276,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ret i32 %2 } -declare i64 @llvm.ctlz.i64(i64, i1) - define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: @@ -340,8 +336,6 @@ define i64 @ctlz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: @@ -526,8 +520,6 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind { ret i32 %4 } -declare i64 @llvm.cttz.i64(i64, i1) - define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: @@ -560,8 +552,6 @@ define i64 @cttz_i64(i64 %a) nounwind { ret i64 %1 } -declare i32 @llvm.ctpop.i32(i32) - define signext i32 @ctpop_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctpop_i32: ; RV64I: # %bb.0: @@ -709,8 +699,6 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind { ret i32 %1 } -declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) - define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind { ; RV64I-LABEL: ctpop_v2i32: ; RV64I: # %bb.0: @@ -831,8 +819,6 @@ define <2 x i1> @ctpop_v2i32_ne_one(<2 x i32> %a) nounwind { ret <2 x i1> %2 } -declare i64 @llvm.ctpop.i64(i64) - define i64 @ctpop_i64(i64 %a) nounwind { ; RV64I-LABEL: ctpop_i64: ; RV64I: # %bb.0: @@ -951,8 +937,6 @@ define i1 @ctpop_i64_ne_one(i64 %a) nounwind { ret i1 %2 } -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) - define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind { ; RV64I-LABEL: ctpop_v2i64: ; RV64I: # %bb.0: @@ -1283,8 +1267,6 @@ define i64 @maxu_i64(i64 %a, i64 %b) nounwind { ret i64 %cond } -declare i32 @llvm.abs.i32(i32, i1 immarg) - define i32 @abs_i32(i32 %x) { ; RV64I-LABEL: abs_i32: ; RV64I: # %bb.0: @@ -1320,8 +1302,6 @@ define signext i32 @abs_i32_sext(i32 signext %x) { ret i32 %abs } -declare i64 @llvm.abs.i64(i64, i1 immarg) - define i64 @abs_i64(i64 %x) { ; RV64I-LABEL: abs_i64: ; RV64I: # %bb.0: @@ -1369,8 +1349,6 @@ define i64 @zexth_i64(i64 %a) nounwind { ret i64 %and } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: @@ -1425,8 +1403,6 @@ define void @bswap_i32_nosext(i32 signext %a, ptr %x) nounwind { ret void } -declare i64 @llvm.bswap.i64(i64) - define i64 @bswap_i64(i64 %a) { ; RV64I-LABEL: bswap_i64: ; RV64I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll index ef42c15b6b986..9dbbcb43f5e95 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBC -declare i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b) - define i64 @clmul64r(i64 %a, i64 %b) nounwind { ; RV64ZBC-LABEL: clmul64r: ; RV64ZBC: # %bb.0: @@ -13,8 +11,6 @@ define i64 @clmul64r(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) - define signext i32 @clmul32r(i32 signext %a, i32 signext %b) nounwind { ; RV64ZBC-LABEL: clmul32r: ; RV64ZBC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll index aa9e89bc20953..83bcd57c7094f 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBC-ZBKC -declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) - define i64 @clmul64(i64 %a, i64 %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul64: ; RV64ZBC-ZBKC: # %bb.0: @@ -15,8 +13,6 @@ define i64 @clmul64(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) - define i64 @clmul64h(i64 %a, i64 %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul64h: ; RV64ZBC-ZBKC: # %bb.0: @@ -26,8 +22,6 @@ define i64 @clmul64h(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) - define signext i32 @clmul32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul32: ; RV64ZBC-ZBKC: # %bb.0: @@ -38,8 +32,6 @@ define signext i32 @clmul32(i32 signext %a, i32 signext %b) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) - define signext i32 @clmul32h(i32 signext %a, i32 signext %b) nounwind { ; RV64ZBC-ZBKC-LABEL: clmul32h: ; RV64ZBC-ZBKC: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll index 3169f65f64671..cd9fc2adc80a6 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBKB -declare i64 @llvm.riscv.brev8.i64(i64) - define i64 @brev8(i64 %a) nounwind { ; RV64ZBKB-LABEL: brev8: ; RV64ZBKB: # %bb.0: @@ -25,8 +23,6 @@ define zeroext i16 @brev8_knownbits(i16 zeroext %a) nounwind { ret i16 %trunc } -declare i64 @llvm.bswap.i64(i64) - define i64 @rev8_i64(i64 %a) { ; RV64ZBKB-LABEL: rev8_i64: ; RV64ZBKB: # %bb.0: @@ -36,8 +32,6 @@ define i64 @rev8_i64(i64 %a) { ret i64 %1 } -declare i32 @llvm.riscv.brev8.i32(i32) - define signext i32 @brev8_i32(i32 signext %a) nounwind { ; RV64ZBKB-LABEL: brev8_i32: ; RV64ZBKB: # %bb.0: @@ -60,8 +54,6 @@ define zeroext i16 @brev8_i32_knownbits(i16 zeroext %a) nounwind { ret i16 %trunc } -declare i32 @llvm.bswap.i32(i32) - define signext i32 @rev8_i32(i32 signext %a) { ; RV64ZBKB-LABEL: rev8_i32: ; RV64ZBKB: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll index f4186606c14f9..72afe21aa4e23 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll @@ -1,8 +1,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbkx -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBKX -declare i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b) - define i64 @xperm8(i64 %a, i64 %b) nounwind { ; RV64ZBKX-LABEL: xperm8: ; RV64ZBKX: # %bb.0: @@ -12,8 +10,6 @@ define i64 @xperm8(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b) - define i64 @xperm4(i64 %a, i64 %b) nounwind { ; RV64ZBKX-LABEL: xperm4: ; RV64ZBKX: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll index afc41fe86b838..12767f64048b4 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll @@ -24,7 +24,6 @@ define i32 @aext_fptosi(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) define signext i32 @sext_fptosi(half %a) nounwind strictfp { ; RV64IZFH-LABEL: sext_fptosi: @@ -71,7 +70,6 @@ define i32 @aext_fptoui(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) define signext i32 @sext_fptoui(half %a) nounwind strictfp { ; RV64IZFH-LABEL: sext_fptoui: @@ -114,7 +112,6 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata, metadata) define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFH-LABEL: uitofp_sext_i32_to_f16: @@ -157,7 +154,6 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, metadata) define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFH-LABEL: sitofp_sext_i32_to_f16: diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll index a453e3b0f1c53..633634fab2e13 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll @@ -14,8 +14,6 @@ ; These intrinsics require half and i64 to be legal types. -declare i64 @llvm.llrint.i64.f16(half) - define i64 @llrint_f16(half %a) nounwind { ; RV64IZFH-LABEL: llrint_f16: ; RV64IZFH: # %bb.0: @@ -40,8 +38,6 @@ define i64 @llrint_f16(half %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f16(half) - define i64 @llround_f16(half %a) nounwind { ; RV64IZFH-LABEL: llround_f16: ; RV64IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll index 41d66382679f1..91f8af9d92bba 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll @@ -23,7 +23,6 @@ define i32 @aext_fptosi(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata) define signext i32 @sext_fptosi(half %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: sext_fptosi: @@ -76,7 +75,6 @@ define i32 @aext_fptoui(half %a) nounwind strictfp { %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") ret i32 %1 } -declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata) define signext i32 @sext_fptoui(half %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: sext_fptoui: @@ -125,7 +123,6 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata, metadata) define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: uitofp_sext_i32_to_f16: @@ -174,7 +171,6 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind strictfp { %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } -declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, metadata) define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp { ; RV64IZFHMIN-LABEL: sitofp_sext_i32_to_f16: diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll index 7d8f2b03b6721..266d7b01fe5f1 100644 --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll @@ -14,8 +14,6 @@ ; These intrinsics require half and i64 to be legal types. -declare i64 @llvm.llrint.i64.f16(half) - define i64 @llrint_f16(half %a) nounwind { ; CHECKIZFHMIN-LABEL: llrint_f16: ; CHECKIZFHMIN: # %bb.0: @@ -32,8 +30,6 @@ define i64 @llrint_f16(half %a) nounwind { ret i64 %1 } -declare i64 @llvm.llround.i64.f16(half) - define i64 @llround_f16(half %a) nounwind { ; CHECKIZFHMIN-LABEL: llround_f16: ; CHECKIZFHMIN: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll index a407fe552ff74..fbcfe34a835f3 100644 --- a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zimop -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZIMOP -declare i64 @llvm.riscv.mopr.i64(i64 %a, i64 %b) - define i64 @mopr0_64(i64 %a) nounwind { ; RV64ZIMOP-LABEL: mopr0_64: ; RV64ZIMOP: # %bb.0: @@ -22,8 +20,6 @@ define i64 @mopr31_64(i64 %a) nounwind { ret i64 %tmp } -declare i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 %c) - define i64 @moprr0_64(i64 %a, i64 %b) nounwind { ; RV64ZIMOP-LABEL: moprr0_64: ; RV64ZIMOP: # %bb.0: @@ -42,8 +38,6 @@ define i64 @moprr7_64(i64 %a, i64 %b) nounwind { ret i64 %tmp } -declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b) - define signext i32 @mopr0_32(i32 signext %a) nounwind { ; RV64ZIMOP-LABEL: mopr0_32: ; RV64ZIMOP: # %bb.0: @@ -64,8 +58,6 @@ define signext i32 @mopr31_32(i32 signext %a) nounwind { ret i32 %tmp } -declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c) - define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZIMOP-LABEL: moprr0_32: ; RV64ZIMOP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll index ea922ed6775a0..8e9511c9db31b 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zknd -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKND -declare i64 @llvm.riscv.aes64ds(i64, i64); - define i64 @aes64ds(i64 %a, i64 %b) nounwind { ; RV64ZKND-LABEL: aes64ds: ; RV64ZKND: # %bb.0: @@ -13,8 +11,6 @@ define i64 @aes64ds(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64dsm(i64, i64); - define i64 @aes64dsm(i64 %a, i64 %b) nounwind { ; RV64ZKND-LABEL: aes64dsm: ; RV64ZKND: # %bb.0: @@ -24,8 +20,6 @@ define i64 @aes64dsm(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64im(i64); - define i64 @aes64im(i64 %a) nounwind { ; RV64ZKND-LABEL: aes64im: ; RV64ZKND: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll index 075097037a5b3..7035f6e38d72f 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE -declare i64 @llvm.riscv.aes64ks2(i64, i64); - define i64 @aes64ks2(i64 %a, i64 %b) nounwind { ; RV64ZKND-ZKNE-LABEL: aes64ks2: ; RV64ZKND-ZKNE: # %bb.0: @@ -15,8 +13,6 @@ define i64 @aes64ks2(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64ks1i(i64, i32); - define i64 @aes64ks1i(i64 %a) nounwind { ; RV64ZKND-ZKNE-LABEL: aes64ks1i: ; RV64ZKND-ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll index eee03a0c4469b..f1280c2de4af8 100644 --- a/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKNE -declare i64 @llvm.riscv.aes64es(i64, i64); - define i64 @aes64es(i64 %a, i64 %b) nounwind { ; RV64ZKNE-LABEL: aes64es: ; RV64ZKNE: # %bb.0: @@ -13,8 +11,6 @@ define i64 @aes64es(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.aes64esm(i64, i64); - define i64 @aes64esm(i64 %a, i64 %b) nounwind { ; RV64ZKNE-LABEL: aes64esm: ; RV64ZKNE: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll index b96524b3294fc..72051e9af7f4d 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic-autoupgrade.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zknh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKNH - -declare i64 @llvm.riscv.sha256sig0.i64(i64); - define i64 @sha256sig0_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sig0_i64: ; RV64ZKNH: # %bb.0: @@ -14,8 +11,6 @@ define i64 @sha256sig0_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha256sig1.i64(i64); - define i64 @sha256sig1_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sig1_i64: ; RV64ZKNH: # %bb.0: @@ -25,8 +20,6 @@ define i64 @sha256sig1_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha256sum0.i64(i64); - define i64 @sha256sum0_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sum0_i64: ; RV64ZKNH: # %bb.0: @@ -36,8 +29,6 @@ define i64 @sha256sum0_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha256sum1.i64(i64); - define i64 @sha256sum1_i64(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha256sum1_i64: ; RV64ZKNH: # %bb.0: @@ -47,8 +38,6 @@ define i64 @sha256sum1_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sig0(i64); - define i64 @sha512sig0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig0: ; RV64ZKNH: # %bb.0: @@ -58,8 +47,6 @@ define i64 @sha512sig0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sig1(i64); - define i64 @sha512sig1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig1: ; RV64ZKNH: # %bb.0: @@ -69,8 +56,6 @@ define i64 @sha512sig1(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum0(i64); - define i64 @sha512sum0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum0: ; RV64ZKNH: # %bb.0: @@ -80,8 +65,6 @@ define i64 @sha512sum0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum1(i64); - define i64 @sha512sum1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum1: ; RV64ZKNH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll index 866995edbfa47..b563600724392 100644 --- a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zknh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKNH - -declare i32 @llvm.riscv.sha256sig0(i32); - define signext i32 @sha256sig0_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sig0_i32: ; RV64ZKNH: # %bb.0: @@ -14,8 +11,6 @@ define signext i32 @sha256sig0_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sig1(i32); - define signext i32 @sha256sig1_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sig1_i32: ; RV64ZKNH: # %bb.0: @@ -25,8 +20,6 @@ define signext i32 @sha256sig1_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum0(i32); - define signext i32 @sha256sum0_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sum0_i32: ; RV64ZKNH: # %bb.0: @@ -36,8 +29,6 @@ define signext i32 @sha256sum0_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sha256sum1(i32); - define signext i32 @sha256sum1_i32(i32 signext %a) nounwind { ; RV64ZKNH-LABEL: sha256sum1_i32: ; RV64ZKNH: # %bb.0: @@ -47,8 +38,6 @@ define signext i32 @sha256sum1_i32(i32 signext %a) nounwind { ret i32 %val } -declare i64 @llvm.riscv.sha512sig0(i64); - define i64 @sha512sig0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig0: ; RV64ZKNH: # %bb.0: @@ -58,8 +47,6 @@ define i64 @sha512sig0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sig1(i64); - define i64 @sha512sig1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sig1: ; RV64ZKNH: # %bb.0: @@ -69,8 +56,6 @@ define i64 @sha512sig1(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum0(i64); - define i64 @sha512sum0(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum0: ; RV64ZKNH: # %bb.0: @@ -80,8 +65,6 @@ define i64 @sha512sum0(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sha512sum1(i64); - define i64 @sha512sum1(i64 %a) nounwind { ; RV64ZKNH-LABEL: sha512sum1: ; RV64ZKNH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll index 2fa7601906067..ad6ac276f0d5b 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSED -declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i8); - define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ks_i64: ; RV64ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i8); - define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ed_i64: ; RV64ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll index c942ff884450b..0defffd2e899d 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade2.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSED -declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i32); - define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ks_i64: ; RV64ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i32); - define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind { ; RV64ZKSED-LABEL: sm4ed_i64: ; RV64ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll index bc7807350fcb2..90864600599ba 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSED -declare i32 @llvm.riscv.sm4ks(i32, i32, i32); - define signext i32 @sm4ks_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZKSED-LABEL: sm4ks_i32: ; RV64ZKSED: # %bb.0: @@ -13,8 +11,6 @@ define signext i32 @sm4ks_i32(i32 signext %a, i32 signext %b) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm4ed(i32, i32, i32); - define signext i32 @sm4ed_i32(i32 signext %a, i32 signext %b) nounwind { ; RV64ZKSED-LABEL: sm4ed_i32: ; RV64ZKSED: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll index 8790ec1af24dd..6767bdc9ba6d5 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic-autoupgrade.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSH -declare i64 @llvm.riscv.sm3p0.i64(i64); - define i64 @sm3p0_i64(i64 %a) nounwind { ; RV64ZKSH-LABEL: sm3p0_i64: ; RV64ZKSH: # %bb.0: @@ -13,8 +11,6 @@ define i64 @sm3p0_i64(i64 %a) nounwind { ret i64 %val } -declare i64 @llvm.riscv.sm3p1.i64(i64); - define i64 @sm3p1_i64(i64 %a) nounwind { ; RV64ZKSH-LABEL: sm3p1_i64: ; RV64ZKSH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll index 3436236d46359..8d3e96fa0ee57 100644 --- a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZKSH -declare i32 @llvm.riscv.sm3p0(i32); - define signext i32 @sm3p0_i32(i32 signext %a) nounwind { ; RV64ZKSH-LABEL: sm3p0_i32: ; RV64ZKSH: # %bb.0: @@ -13,8 +11,6 @@ define signext i32 @sm3p0_i32(i32 signext %a) nounwind { ret i32 %val } -declare i32 @llvm.riscv.sm3p1(i32); - define signext i32 @sm3p1_i32(i32 signext %a) nounwind { ; RV64ZKSH-LABEL: sm3p1_i32: ; RV64ZKSH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll index acbb2401fd92e..d4ea9e6c3def0 100644 --- a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll @@ -605,21 +605,3 @@ define void @test_psslai_h(ptr %ret_ptr, ptr %a_ptr) { store <2 x i16> %res, ptr %ret_ptr ret void } - -; Intrinsic declarations -declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) -declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) -declare <2 x i16> @llvm.smin.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.smax.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.umin.v2i16(<2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.umax.v2i16(<2 x i16>, <2 x i16>) -declare <4 x i8> @llvm.smin.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.smax.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.umin.v4i8(<4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.umax.v4i8(<4 x i8>, <4 x i8>) diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll index f05c60c9146a5..b39b807d43154 100644 --- a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll @@ -793,20 +793,3 @@ define void @test_psslai_w(ptr %ret_ptr, ptr %a_ptr) { store <2 x i32> %res, ptr %ret_ptr ret void } -; Intrinsic declarations -declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) -declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) -declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>) -declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>) diff --git a/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll index 44fd9046fa0e0..ee1d889c3cd15 100644 --- a/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll +++ b/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll @@ -2,11 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+m,+zvfh \ ; RUN: < %s | FileCheck %s -declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(, i64 immarg) -declare @llvm.vector.insert.nxv8i8.v16i8(, <16 x i8>, i64 immarg) -declare @llvm.riscv.vslideup.nxv8i8.i64(, , i64, i64, i64 immarg) -declare @llvm.vector.insert.nxv2i32.v4i32(, <4 x i32>, i64 immarg) - define void @foo( %0) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll index be4292c9902eb..949a9a3dfc470 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll @@ -328,18 +328,6 @@ define @uabd_non_matching_promotion( %a, %abs } -declare @llvm.abs.nxv16i8(, i1) - -declare @llvm.abs.nxv8i16(, i1) -declare @llvm.abs.nxv16i16(, i1) - -declare @llvm.abs.nxv4i32(, i1) -declare @llvm.abs.nxv8i32(, i1) - -declare @llvm.abs.nxv2i64(, i1) -declare @llvm.abs.nxv4i64(, i1) - -declare @llvm.abs.nxv2i128(, i1) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll index 589b9994651d2..7260cca64a476 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.abs.nxv1i16(, i1) - define @vabs_nxv1i16( %v) { ; CHECK-LABEL: vabs_nxv1i16: ; CHECK: # %bb.0: @@ -15,8 +13,6 @@ define @vabs_nxv1i16( %v) { ret %r } -declare @llvm.abs.nxv2i16(, i1) - define @vabs_nxv2i16( %v) { ; CHECK-LABEL: vabs_nxv2i16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define @vabs_nxv2i16( %v) { ret %r } -declare @llvm.abs.nxv4i16(, i1) - define @vabs_nxv4i16( %v) { ; CHECK-LABEL: vabs_nxv4i16: ; CHECK: # %bb.0: @@ -41,8 +35,6 @@ define @vabs_nxv4i16( %v) { ret %r } -declare @llvm.abs.nxv8i16(, i1) - define @vabs_nxv8i16( %v) { ; CHECK-LABEL: vabs_nxv8i16: ; CHECK: # %bb.0: @@ -54,8 +46,6 @@ define @vabs_nxv8i16( %v) { ret %r } -declare @llvm.abs.nxv16i16(, i1) - define @vabs_nxv16i16( %v) { ; CHECK-LABEL: vabs_nxv16i16: ; CHECK: # %bb.0: @@ -67,8 +57,6 @@ define @vabs_nxv16i16( %v) { ret %r } -declare @llvm.abs.nxv32i16(, i1) - define @vabs_nxv32i16( %v) { ; CHECK-LABEL: vabs_nxv32i16: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define @vabs_nxv32i16( %v) { ret %r } -declare @llvm.abs.nxv1i32(, i1) - define @vabs_nxv1i32( %v) { ; CHECK-LABEL: vabs_nxv1i32: ; CHECK: # %bb.0: @@ -93,8 +79,6 @@ define @vabs_nxv1i32( %v) { ret %r } -declare @llvm.abs.nxv2i32(, i1) - define @vabs_nxv2i32( %v) { ; CHECK-LABEL: vabs_nxv2i32: ; CHECK: # %bb.0: @@ -106,8 +90,6 @@ define @vabs_nxv2i32( %v) { ret %r } -declare @llvm.abs.nxv4i32(, i1) - define @vabs_nxv4i32( %v) { ; CHECK-LABEL: vabs_nxv4i32: ; CHECK: # %bb.0: @@ -119,8 +101,6 @@ define @vabs_nxv4i32( %v) { ret %r } -declare @llvm.abs.nxv8i32(, i1) - define @vabs_nxv8i32( %v) { ; CHECK-LABEL: vabs_nxv8i32: ; CHECK: # %bb.0: @@ -132,8 +112,6 @@ define @vabs_nxv8i32( %v) { ret %r } -declare @llvm.abs.nxv16i32(, i1) - define @vabs_nxv16i32( %v) { ; CHECK-LABEL: vabs_nxv16i32: ; CHECK: # %bb.0: @@ -145,8 +123,6 @@ define @vabs_nxv16i32( %v) { ret %r } -declare @llvm.abs.nxv1i64(, i1) - define @vabs_nxv1i64( %v) { ; CHECK-LABEL: vabs_nxv1i64: ; CHECK: # %bb.0: @@ -158,8 +134,6 @@ define @vabs_nxv1i64( %v) { ret %r } -declare @llvm.abs.nxv2i64(, i1) - define @vabs_nxv2i64( %v) { ; CHECK-LABEL: vabs_nxv2i64: ; CHECK: # %bb.0: @@ -171,8 +145,6 @@ define @vabs_nxv2i64( %v) { ret %r } -declare @llvm.abs.nxv4i64(, i1) - define @vabs_nxv4i64( %v) { ; CHECK-LABEL: vabs_nxv4i64: ; CHECK: # %bb.0: @@ -184,8 +156,6 @@ define @vabs_nxv4i64( %v) { ret %r } -declare @llvm.abs.nxv8i64(, i1) - define @vabs_nxv8i64( %v) { ; CHECK-LABEL: vabs_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll index 2bee8de168d7d..5b215c5173211 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -declare @llvm.vp.abs.nxv1i8(, i1 immarg, , i32) - define @vp_abs_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i8: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define @vp_abs_nxv1i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv2i8(, i1 immarg, , i32) - define @vp_abs_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i8: ; CHECK: # %bb.0: @@ -52,8 +48,6 @@ define @vp_abs_nxv2i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv4i8(, i1 immarg, , i32) - define @vp_abs_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i8: ; CHECK: # %bb.0: @@ -76,8 +70,6 @@ define @vp_abs_nxv4i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv8i8(, i1 immarg, , i32) - define @vp_abs_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i8: ; CHECK: # %bb.0: @@ -100,8 +92,6 @@ define @vp_abs_nxv8i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.abs.nxv16i8(, i1 immarg, , i32) - define @vp_abs_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i8: ; CHECK: # %bb.0: @@ -124,8 +114,6 @@ define @vp_abs_nxv16i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv32i8(, i1 immarg, , i32) - define @vp_abs_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv32i8: ; CHECK: # %bb.0: @@ -148,8 +136,6 @@ define @vp_abs_nxv32i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv64i8(, i1 immarg, , i32) - define @vp_abs_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv64i8: ; CHECK: # %bb.0: @@ -172,8 +158,6 @@ define @vp_abs_nxv64i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv1i16(, i1 immarg, , i32) - define @vp_abs_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i16: ; CHECK: # %bb.0: @@ -196,8 +180,6 @@ define @vp_abs_nxv1i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv2i16(, i1 immarg, , i32) - define @vp_abs_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i16: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define @vp_abs_nxv2i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv4i16(, i1 immarg, , i32) - define @vp_abs_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i16: ; CHECK: # %bb.0: @@ -244,8 +224,6 @@ define @vp_abs_nxv4i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv8i16(, i1 immarg, , i32) - define @vp_abs_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i16: ; CHECK: # %bb.0: @@ -268,8 +246,6 @@ define @vp_abs_nxv8i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv16i16(, i1 immarg, , i32) - define @vp_abs_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i16: ; CHECK: # %bb.0: @@ -292,8 +268,6 @@ define @vp_abs_nxv16i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.abs.nxv32i16(, i1 immarg, , i32) - define @vp_abs_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv32i16: ; CHECK: # %bb.0: @@ -316,8 +290,6 @@ define @vp_abs_nxv32i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.abs.nxv1i32(, i1 immarg, , i32) - define @vp_abs_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i32: ; CHECK: # %bb.0: @@ -340,8 +312,6 @@ define @vp_abs_nxv1i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv2i32(, i1 immarg, , i32) - define @vp_abs_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i32: ; CHECK: # %bb.0: @@ -364,8 +334,6 @@ define @vp_abs_nxv2i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv4i32(, i1 immarg, , i32) - define @vp_abs_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i32: ; CHECK: # %bb.0: @@ -388,8 +356,6 @@ define @vp_abs_nxv4i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv8i32(, i1 immarg, , i32) - define @vp_abs_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i32: ; CHECK: # %bb.0: @@ -412,8 +378,6 @@ define @vp_abs_nxv8i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv16i32(, i1 immarg, , i32) - define @vp_abs_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i32: ; CHECK: # %bb.0: @@ -436,8 +400,6 @@ define @vp_abs_nxv16i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.abs.nxv1i64(, i1 immarg, , i32) - define @vp_abs_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv1i64: ; CHECK: # %bb.0: @@ -460,8 +422,6 @@ define @vp_abs_nxv1i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv2i64(, i1 immarg, , i32) - define @vp_abs_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv2i64: ; CHECK: # %bb.0: @@ -484,8 +444,6 @@ define @vp_abs_nxv2i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv4i64(, i1 immarg, , i32) - define @vp_abs_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv4i64: ; CHECK: # %bb.0: @@ -508,8 +466,6 @@ define @vp_abs_nxv4i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv7i64(, i1 immarg, , i32) - define @vp_abs_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv7i64: ; CHECK: # %bb.0: @@ -532,8 +488,6 @@ define @vp_abs_nxv7i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv8i64(, i1 immarg, , i32) - define @vp_abs_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv8i64: ; CHECK: # %bb.0: @@ -556,8 +510,6 @@ define @vp_abs_nxv8i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.abs.nxv16i64(, i1 immarg, , i32) - define @vp_abs_nxv16i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_nxv16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll index 10156f14252a4..4aaaa88db8b6e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -22,12 +22,6 @@ define @access_fixed_object(ptr %val) { ret %v } -declare @llvm.riscv.vadd.nxv1i64.nxv1i64( - , - , - , - i64); - define @access_fixed_and_vector_objects(ptr %val) { ; RV64IV-LABEL: access_fixed_and_vector_objects: ; RV64IV: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll index 1acc830347de4..a2bf9b2906a87 100644 --- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll @@ -200,10 +200,3 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) { ret <128 x i1> %mask } - -declare @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64) -declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64) -declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64) -declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64) -declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64, i64) -declare <128 x i1> @llvm.get.active.lane.mask.v128i1.i64(i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll index cd896c9fa0f08..9b7d9736d9835 100644 --- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll @@ -48,8 +48,3 @@ entry: ret %3 } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( - , - , - , - i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll b/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll index 58cfbc331609f..42048b80f98d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll +++ b/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll @@ -2,15 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmset.nxv1i1(iXLen); - -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - ; Use unmasked instruction because the mask operand is allone mask define @test0( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: test0: diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll index a75c159339bed..8e33d634a61d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -33,7 +33,6 @@ define @bitreverse_nxv1i8( %va) { %a = call @llvm.bitreverse.nxv1i8( %va) ret %a } -declare @llvm.bitreverse.nxv1i8() define @bitreverse_nxv2i8( %va) { ; CHECK-LABEL: bitreverse_nxv2i8: @@ -64,7 +63,6 @@ define @bitreverse_nxv2i8( %va) { %a = call @llvm.bitreverse.nxv2i8( %va) ret %a } -declare @llvm.bitreverse.nxv2i8() define @bitreverse_nxv4i8( %va) { ; CHECK-LABEL: bitreverse_nxv4i8: @@ -95,7 +93,6 @@ define @bitreverse_nxv4i8( %va) { %a = call @llvm.bitreverse.nxv4i8( %va) ret %a } -declare @llvm.bitreverse.nxv4i8() define @bitreverse_nxv8i8( %va) { ; CHECK-LABEL: bitreverse_nxv8i8: @@ -126,7 +123,6 @@ define @bitreverse_nxv8i8( %va) { %a = call @llvm.bitreverse.nxv8i8( %va) ret %a } -declare @llvm.bitreverse.nxv8i8() define @bitreverse_nxv16i8( %va) { ; CHECK-LABEL: bitreverse_nxv16i8: @@ -157,7 +153,6 @@ define @bitreverse_nxv16i8( %va) { %a = call @llvm.bitreverse.nxv16i8( %va) ret %a } -declare @llvm.bitreverse.nxv16i8() define @bitreverse_nxv32i8( %va) { ; CHECK-LABEL: bitreverse_nxv32i8: @@ -188,7 +183,6 @@ define @bitreverse_nxv32i8( %va) { %a = call @llvm.bitreverse.nxv32i8( %va) ret %a } -declare @llvm.bitreverse.nxv32i8() define @bitreverse_nxv64i8( %va) { ; CHECK-LABEL: bitreverse_nxv64i8: @@ -219,7 +213,6 @@ define @bitreverse_nxv64i8( %va) { %a = call @llvm.bitreverse.nxv64i8( %va) ret %a } -declare @llvm.bitreverse.nxv64i8() define @bitreverse_nxv1i16( %va) { ; CHECK-LABEL: bitreverse_nxv1i16: @@ -259,7 +252,6 @@ define @bitreverse_nxv1i16( %va) { %a = call @llvm.bitreverse.nxv1i16( %va) ret %a } -declare @llvm.bitreverse.nxv1i16() define @bitreverse_nxv2i16( %va) { ; CHECK-LABEL: bitreverse_nxv2i16: @@ -299,7 +291,6 @@ define @bitreverse_nxv2i16( %va) { %a = call @llvm.bitreverse.nxv2i16( %va) ret %a } -declare @llvm.bitreverse.nxv2i16() define @bitreverse_nxv4i16( %va) { ; CHECK-LABEL: bitreverse_nxv4i16: @@ -339,7 +330,6 @@ define @bitreverse_nxv4i16( %va) { %a = call @llvm.bitreverse.nxv4i16( %va) ret %a } -declare @llvm.bitreverse.nxv4i16() define @bitreverse_nxv8i16( %va) { ; CHECK-LABEL: bitreverse_nxv8i16: @@ -379,7 +369,6 @@ define @bitreverse_nxv8i16( %va) { %a = call @llvm.bitreverse.nxv8i16( %va) ret %a } -declare @llvm.bitreverse.nxv8i16() define @bitreverse_nxv16i16( %va) { ; CHECK-LABEL: bitreverse_nxv16i16: @@ -419,7 +408,6 @@ define @bitreverse_nxv16i16( %va) { %a = call @llvm.bitreverse.nxv16i16( %va) ret %a } -declare @llvm.bitreverse.nxv16i16() define @bitreverse_nxv32i16( %va) { ; CHECK-LABEL: bitreverse_nxv32i16: @@ -459,7 +447,6 @@ define @bitreverse_nxv32i16( %va) { %a = call @llvm.bitreverse.nxv32i16( %va) ret %a } -declare @llvm.bitreverse.nxv32i16() define @bitreverse_nxv1i32( %va) { ; CHECK-LABEL: bitreverse_nxv1i32: @@ -507,7 +494,6 @@ define @bitreverse_nxv1i32( %va) { %a = call @llvm.bitreverse.nxv1i32( %va) ret %a } -declare @llvm.bitreverse.nxv1i32() define @bitreverse_nxv2i32( %va) { ; CHECK-LABEL: bitreverse_nxv2i32: @@ -555,7 +541,6 @@ define @bitreverse_nxv2i32( %va) { %a = call @llvm.bitreverse.nxv2i32( %va) ret %a } -declare @llvm.bitreverse.nxv2i32() define @bitreverse_nxv4i32( %va) { ; CHECK-LABEL: bitreverse_nxv4i32: @@ -603,7 +588,6 @@ define @bitreverse_nxv4i32( %va) { %a = call @llvm.bitreverse.nxv4i32( %va) ret %a } -declare @llvm.bitreverse.nxv4i32() define @bitreverse_nxv8i32( %va) { ; CHECK-LABEL: bitreverse_nxv8i32: @@ -651,7 +635,6 @@ define @bitreverse_nxv8i32( %va) { %a = call @llvm.bitreverse.nxv8i32( %va) ret %a } -declare @llvm.bitreverse.nxv8i32() define @bitreverse_nxv16i32( %va) { ; CHECK-LABEL: bitreverse_nxv16i32: @@ -699,7 +682,6 @@ define @bitreverse_nxv16i32( %va) { %a = call @llvm.bitreverse.nxv16i32( %va) ret %a } -declare @llvm.bitreverse.nxv16i32() define @bitreverse_nxv1i64( %va) { ; RV32-LABEL: bitreverse_nxv1i64: @@ -840,7 +822,6 @@ define @bitreverse_nxv1i64( %va) { %a = call @llvm.bitreverse.nxv1i64( %va) ret %a } -declare @llvm.bitreverse.nxv1i64() define @bitreverse_nxv2i64( %va) { ; RV32-LABEL: bitreverse_nxv2i64: @@ -981,7 +962,6 @@ define @bitreverse_nxv2i64( %va) { %a = call @llvm.bitreverse.nxv2i64( %va) ret %a } -declare @llvm.bitreverse.nxv2i64() define @bitreverse_nxv4i64( %va) { ; RV32-LABEL: bitreverse_nxv4i64: @@ -1122,7 +1102,6 @@ define @bitreverse_nxv4i64( %va) { %a = call @llvm.bitreverse.nxv4i64( %va) ret %a } -declare @llvm.bitreverse.nxv4i64() define @bitreverse_nxv8i64( %va) { ; RV32-LABEL: bitreverse_nxv8i64: @@ -1285,4 +1264,3 @@ define @bitreverse_nxv8i64( %va) { %a = call @llvm.bitreverse.nxv8i64( %va) ret %a } -declare @llvm.bitreverse.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll index f704a8ca875ba..09b8fdbf11d26 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.bitreverse.nxv1i8(, , i32) - define @vp_bitreverse_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i8: ; CHECK: # %bb.0: @@ -74,8 +72,6 @@ define @vp_bitreverse_nxv1i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv2i8(, , i32) - define @vp_bitreverse_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i8: ; CHECK: # %bb.0: @@ -140,8 +136,6 @@ define @vp_bitreverse_nxv2i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv4i8(, , i32) - define @vp_bitreverse_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i8: ; CHECK: # %bb.0: @@ -206,8 +200,6 @@ define @vp_bitreverse_nxv4i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv8i8(, , i32) - define @vp_bitreverse_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i8: ; CHECK: # %bb.0: @@ -272,8 +264,6 @@ define @vp_bitreverse_nxv8i8_unmasked( %va, i ret %v } -declare @llvm.vp.bitreverse.nxv16i8(, , i32) - define @vp_bitreverse_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i8: ; CHECK: # %bb.0: @@ -338,8 +328,6 @@ define @vp_bitreverse_nxv16i8_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv32i8(, , i32) - define @vp_bitreverse_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i8: ; CHECK: # %bb.0: @@ -404,8 +392,6 @@ define @vp_bitreverse_nxv32i8_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv64i8(, , i32) - define @vp_bitreverse_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i8: ; CHECK: # %bb.0: @@ -470,8 +456,6 @@ define @vp_bitreverse_nxv64i8_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv1i16(, , i32) - define @vp_bitreverse_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i16: ; CHECK: # %bb.0: @@ -550,8 +534,6 @@ define @vp_bitreverse_nxv1i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv2i16(, , i32) - define @vp_bitreverse_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i16: ; CHECK: # %bb.0: @@ -630,8 +612,6 @@ define @vp_bitreverse_nxv2i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv4i16(, , i32) - define @vp_bitreverse_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i16: ; CHECK: # %bb.0: @@ -710,8 +690,6 @@ define @vp_bitreverse_nxv4i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv8i16(, , i32) - define @vp_bitreverse_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i16: ; CHECK: # %bb.0: @@ -790,8 +768,6 @@ define @vp_bitreverse_nxv8i16_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv16i16(, , i32) - define @vp_bitreverse_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i16: ; CHECK: # %bb.0: @@ -870,8 +846,6 @@ define @vp_bitreverse_nxv16i16_unmasked( ret %v } -declare @llvm.vp.bitreverse.nxv32i16(, , i32) - define @vp_bitreverse_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i16: ; CHECK: # %bb.0: @@ -950,8 +924,6 @@ define @vp_bitreverse_nxv32i16_unmasked( ret %v } -declare @llvm.vp.bitreverse.nxv1i32(, , i32) - define @vp_bitreverse_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i32: ; CHECK: # %bb.0: @@ -1046,8 +1018,6 @@ define @vp_bitreverse_nxv1i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv2i32(, , i32) - define @vp_bitreverse_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i32: ; CHECK: # %bb.0: @@ -1142,8 +1112,6 @@ define @vp_bitreverse_nxv2i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv4i32(, , i32) - define @vp_bitreverse_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i32: ; CHECK: # %bb.0: @@ -1238,8 +1206,6 @@ define @vp_bitreverse_nxv4i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv8i32(, , i32) - define @vp_bitreverse_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i32: ; CHECK: # %bb.0: @@ -1334,8 +1300,6 @@ define @vp_bitreverse_nxv8i32_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv16i32(, , i32) - define @vp_bitreverse_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i32: ; CHECK: # %bb.0: @@ -1430,8 +1394,6 @@ define @vp_bitreverse_nxv16i32_unmasked( ret %v } -declare @llvm.vp.bitreverse.nxv1i64(, , i32) - define @vp_bitreverse_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv1i64: ; RV32: # %bb.0: @@ -1712,8 +1674,6 @@ define @vp_bitreverse_nxv1i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv2i64(, , i32) - define @vp_bitreverse_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv2i64: ; RV32: # %bb.0: @@ -1994,8 +1954,6 @@ define @vp_bitreverse_nxv2i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv4i64(, , i32) - define @vp_bitreverse_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv4i64: ; RV32: # %bb.0: @@ -2276,8 +2234,6 @@ define @vp_bitreverse_nxv4i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv7i64(, , i32) - define @vp_bitreverse_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv7i64: ; RV32: # %bb.0: @@ -2663,8 +2619,6 @@ define @vp_bitreverse_nxv7i64_unmasked( %va ret %v } -declare @llvm.vp.bitreverse.nxv8i64(, , i32) - define @vp_bitreverse_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv8i64: ; RV32: # %bb.0: @@ -3051,7 +3005,6 @@ define @vp_bitreverse_nxv8i64_unmasked( %va } ; Test splitting. Use i16 version for easier check. -declare @llvm.vp.bitreverse.nxv64i16(, , i32) define @vp_bitreverse_nxv64i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i16: @@ -3224,7 +3177,6 @@ define @vp_bitreverse_nxv64i16_unmasked( } ; Test promotion. -declare @llvm.vp.bitreverse.nxv1i9(, , i32) define @vp_bitreverse_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll index b8521c37e4906..51a72d3f435b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -21,7 +21,6 @@ define @bswap_nxv1i16( %va) { %a = call @llvm.bswap.nxv1i16( %va) ret %a } -declare @llvm.bswap.nxv1i16() define @bswap_nxv2i16( %va) { ; CHECK-LABEL: bswap_nxv2i16: @@ -40,7 +39,6 @@ define @bswap_nxv2i16( %va) { %a = call @llvm.bswap.nxv2i16( %va) ret %a } -declare @llvm.bswap.nxv2i16() define @bswap_nxv4i16( %va) { ; CHECK-LABEL: bswap_nxv4i16: @@ -59,7 +57,6 @@ define @bswap_nxv4i16( %va) { %a = call @llvm.bswap.nxv4i16( %va) ret %a } -declare @llvm.bswap.nxv4i16() define @bswap_nxv8i16( %va) { ; CHECK-LABEL: bswap_nxv8i16: @@ -78,7 +75,6 @@ define @bswap_nxv8i16( %va) { %a = call @llvm.bswap.nxv8i16( %va) ret %a } -declare @llvm.bswap.nxv8i16() define @bswap_nxv16i16( %va) { ; CHECK-LABEL: bswap_nxv16i16: @@ -97,7 +93,6 @@ define @bswap_nxv16i16( %va) { %a = call @llvm.bswap.nxv16i16( %va) ret %a } -declare @llvm.bswap.nxv16i16() define @bswap_nxv32i16( %va) { ; CHECK-LABEL: bswap_nxv32i16: @@ -116,7 +111,6 @@ define @bswap_nxv32i16( %va) { %a = call @llvm.bswap.nxv32i16( %va) ret %a } -declare @llvm.bswap.nxv32i16() define @bswap_nxv1i32( %va) { ; CHECK-LABEL: bswap_nxv1i32: @@ -143,7 +137,6 @@ define @bswap_nxv1i32( %va) { %a = call @llvm.bswap.nxv1i32( %va) ret %a } -declare @llvm.bswap.nxv1i32() define @bswap_nxv2i32( %va) { ; CHECK-LABEL: bswap_nxv2i32: @@ -170,7 +163,6 @@ define @bswap_nxv2i32( %va) { %a = call @llvm.bswap.nxv2i32( %va) ret %a } -declare @llvm.bswap.nxv2i32() define @bswap_nxv4i32( %va) { ; CHECK-LABEL: bswap_nxv4i32: @@ -197,7 +189,6 @@ define @bswap_nxv4i32( %va) { %a = call @llvm.bswap.nxv4i32( %va) ret %a } -declare @llvm.bswap.nxv4i32() define @bswap_nxv8i32( %va) { ; CHECK-LABEL: bswap_nxv8i32: @@ -224,7 +215,6 @@ define @bswap_nxv8i32( %va) { %a = call @llvm.bswap.nxv8i32( %va) ret %a } -declare @llvm.bswap.nxv8i32() define @bswap_nxv16i32( %va) { ; CHECK-LABEL: bswap_nxv16i32: @@ -251,7 +241,6 @@ define @bswap_nxv16i32( %va) { %a = call @llvm.bswap.nxv16i32( %va) ret %a } -declare @llvm.bswap.nxv16i32() define @bswap_nxv1i64( %va) { ; RV32-LABEL: bswap_nxv1i64: @@ -335,7 +324,6 @@ define @bswap_nxv1i64( %va) { %a = call @llvm.bswap.nxv1i64( %va) ret %a } -declare @llvm.bswap.nxv1i64() define @bswap_nxv2i64( %va) { ; RV32-LABEL: bswap_nxv2i64: @@ -419,7 +407,6 @@ define @bswap_nxv2i64( %va) { %a = call @llvm.bswap.nxv2i64( %va) ret %a } -declare @llvm.bswap.nxv2i64() define @bswap_nxv4i64( %va) { ; RV32-LABEL: bswap_nxv4i64: @@ -503,7 +490,6 @@ define @bswap_nxv4i64( %va) { %a = call @llvm.bswap.nxv4i64( %va) ret %a } -declare @llvm.bswap.nxv4i64() define @bswap_nxv8i64( %va) { ; RV32-LABEL: bswap_nxv8i64: @@ -609,4 +595,3 @@ define @bswap_nxv8i64( %va) { %a = call @llvm.bswap.nxv8i64( %va) ret %a } -declare @llvm.bswap.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll index 3d31cf80cdd3a..0177b8cfd4393 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB -declare @llvm.vp.bswap.nxv1i16(, , i32) - define @vp_bswap_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv1i16: ; CHECK: # %bb.0: @@ -46,8 +44,6 @@ define @vp_bswap_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv2i16(, , i32) - define @vp_bswap_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv2i16: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define @vp_bswap_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv4i16(, , i32) - define @vp_bswap_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv4i16: ; CHECK: # %bb.0: @@ -122,8 +116,6 @@ define @vp_bswap_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv8i16(, , i32) - define @vp_bswap_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv8i16: ; CHECK: # %bb.0: @@ -160,8 +152,6 @@ define @vp_bswap_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv16i16(, , i32) - define @vp_bswap_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv16i16: ; CHECK: # %bb.0: @@ -198,8 +188,6 @@ define @vp_bswap_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.bswap.nxv32i16(, , i32) - define @vp_bswap_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv32i16: ; CHECK: # %bb.0: @@ -236,8 +224,6 @@ define @vp_bswap_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.bswap.nxv1i32(, , i32) - define @vp_bswap_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv1i32: ; CHECK: # %bb.0: @@ -290,8 +276,6 @@ define @vp_bswap_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv2i32(, , i32) - define @vp_bswap_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv2i32: ; CHECK: # %bb.0: @@ -344,8 +328,6 @@ define @vp_bswap_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv4i32(, , i32) - define @vp_bswap_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv4i32: ; CHECK: # %bb.0: @@ -398,8 +380,6 @@ define @vp_bswap_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv8i32(, , i32) - define @vp_bswap_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv8i32: ; CHECK: # %bb.0: @@ -452,8 +432,6 @@ define @vp_bswap_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv16i32(, , i32) - define @vp_bswap_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv16i32: ; CHECK: # %bb.0: @@ -506,8 +484,6 @@ define @vp_bswap_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.bswap.nxv1i64(, , i32) - define @vp_bswap_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv1i64: ; RV32: # %bb.0: @@ -674,8 +650,6 @@ define @vp_bswap_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv2i64(, , i32) - define @vp_bswap_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv2i64: ; RV32: # %bb.0: @@ -842,8 +816,6 @@ define @vp_bswap_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv4i64(, , i32) - define @vp_bswap_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv4i64: ; RV32: # %bb.0: @@ -1010,8 +982,6 @@ define @vp_bswap_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv7i64(, , i32) - define @vp_bswap_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv7i64: ; RV32: # %bb.0: @@ -1282,8 +1252,6 @@ define @vp_bswap_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.bswap.nxv8i64(, , i32) - define @vp_bswap_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv8i64: ; RV32: # %bb.0: @@ -1555,7 +1523,6 @@ define @vp_bswap_nxv8i64_unmasked( %va, i32 } ; Test splitting. Use i16 version for easier check. -declare @llvm.vp.bswap.nxv64i16(, , i32) define @vp_bswap_nxv64i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_nxv64i16: @@ -1656,7 +1623,6 @@ define @vp_bswap_nxv64i16_unmasked( %va, } ; Test promotion. -declare @llvm.vp.bswap.nxv1i48(, , i32) define @vp_bswap_nxv1i48( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_nxv1i48: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll index 6507349f45a2f..6c7709f52e30b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFH -declare @llvm.vp.ceil.nxv1bf16(, , i32) - define @vp_ceil_vv_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_ceil_vv_nxv1bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv2bf16(, , i32) - define @vp_ceil_vv_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_ceil_vv_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv4bf16(, , i32) - define @vp_ceil_vv_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_ceil_vv_nxv4bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv8bf16(, , i32) - define @vp_ceil_vv_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_ceil_vv_nxv8bf16_unmasked( ret %v } -declare @llvm.vp.ceil.nxv16bf16(, , i32) - define @vp_ceil_vv_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_ceil_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.ceil.nxv32bf16(, , i32) - define @vp_ceil_vv_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_ceil_vv_nxv32bf16_unmasked( @llvm.vp.ceil.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.ceil.nxv1f16(, , i32) define @vp_ceil_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv1f16: @@ -490,8 +477,6 @@ define @vp_ceil_vv_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv2f16(, , i32) - define @vp_ceil_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_ceil_vv_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv4f16(, , i32) - define @vp_ceil_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_ceil_vv_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv8f16(, , i32) - define @vp_ceil_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_ceil_vv_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.ceil.nxv16f16(, , i32) - define @vp_ceil_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_ceil_vv_nxv16f16_unmasked( % ret %v } -declare @llvm.vp.ceil.nxv32f16(, , i32) - define @vp_ceil_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_ceil_vv_nxv32f16_unmasked( % ret %v } -declare @llvm.vp.ceil.nxv1f32(, , i32) - define @vp_ceil_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_ceil_vv_nxv1f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv2f32(, , i32) - define @vp_ceil_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_ceil_vv_nxv2f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv4f32(, , i32) - define @vp_ceil_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_ceil_vv_nxv4f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv8f32(, , i32) - define @vp_ceil_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_ceil_vv_nxv8f32_unmasked( %v ret %v } -declare @llvm.vp.ceil.nxv16f32(, , i32) - define @vp_ceil_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_ceil_vv_nxv16f32_unmasked( ret %v } -declare @llvm.vp.ceil.nxv1f64(, , i32) - define @vp_ceil_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64: ; RV32ZFH: # %bb.0: @@ -1295,8 +1260,6 @@ define @vp_ceil_vv_nxv1f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv2f64(, , i32) - define @vp_ceil_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64: ; RV32ZFH: # %bb.0: @@ -1375,8 +1338,6 @@ define @vp_ceil_vv_nxv2f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv4f64(, , i32) - define @vp_ceil_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64: ; RV32ZFH: # %bb.0: @@ -1455,8 +1416,6 @@ define @vp_ceil_vv_nxv4f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv7f64(, , i32) - define @vp_ceil_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64: ; RV32ZFH: # %bb.0: @@ -1535,8 +1494,6 @@ define @vp_ceil_vv_nxv7f64_unmasked( ret %v } -declare @llvm.vp.ceil.nxv8f64(, , i32) - define @vp_ceil_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64: ; RV32ZFH: # %bb.0: @@ -1616,7 +1573,6 @@ define @vp_ceil_vv_nxv8f64_unmasked( } ; Test splitting. -declare @llvm.vp.ceil.nxv16f64(, , i32) define @vp_ceil_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll index 8f917becafec0..4237a6bcc2ee5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll @@ -276,10 +276,6 @@ define @vselect_add_const_2_nxv2i64( %a0) { ret %v2 } -declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) -declare @llvm.umin.nxv2i64(, ) -declare @llvm.umax.nxv2i64(, ) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll index 496755738e6fa..88411e49ab5ed 100644 --- a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll @@ -68,8 +68,6 @@ entry: ret void } -declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) - !0 = !{!1, !1, i64 0} !1 = !{!"int", !2, i64 0} !2 = !{!"omnipotent char", !3, i64 0} diff --git a/llvm/test/CodeGen/RISCV/rvv/commutable.ll b/llvm/test/CodeGen/RISCV/rvv/commutable.ll index 05713bc2bb083..a59fde05866d5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/commutable.ll +++ b/llvm/test/CodeGen/RISCV/rvv/commutable.ll @@ -5,7 +5,6 @@ ; RUN: -verify-machineinstrs | FileCheck %s ; vadd.vv -declare @llvm.riscv.vadd.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vadd_vv: ; CHECK: # %bb.0: # %entry @@ -21,7 +20,6 @@ entry: ret %ret } -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vadd_vv_masked: ; CHECK: # %bb.0: @@ -37,7 +35,6 @@ define @commutable_vadd_vv_masked( %0, @llvm.riscv.vand.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vand_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vand_vv: ; CHECK: # %bb.0: # %entry @@ -53,7 +50,6 @@ entry: ret %ret } -declare @llvm.riscv.vand.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vand_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vand_vv_masked: ; CHECK: # %bb.0: @@ -69,7 +65,6 @@ define @commutable_vand_vv_masked( %0, @llvm.riscv.vor.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vor_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vor_vv: ; CHECK: # %bb.0: # %entry @@ -85,7 +80,6 @@ entry: ret %ret } -declare @llvm.riscv.vor.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vor_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vor_vv_masked: ; CHECK: # %bb.0: @@ -101,7 +95,6 @@ define @commutable_vor_vv_masked( %0, @llvm.riscv.vxor.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vxor_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vxor_vv: ; CHECK: # %bb.0: # %entry @@ -117,7 +110,6 @@ entry: ret %ret } -declare @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vxor_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vxor_vv_masked: ; CHECK: # %bb.0: @@ -133,7 +125,6 @@ define @commutable_vxor_vv_masked( %0, @llvm.riscv.vmseq.nxv1i64(, , iXLen); define @commutable_vmseq_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmseq_vv: ; CHECK: # %bb.0: # %entry @@ -149,7 +140,6 @@ entry: ret %ret } -declare @llvm.riscv.vmseq.mask.nxv1i64(, , , , iXLen); define @commutable_vmseq_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmseq_vv_masked: ; CHECK: # %bb.0: @@ -165,7 +155,6 @@ define @commutable_vmseq_vv_masked( %0, @llvm.riscv.vmsne.nxv1i64(, , iXLen); define @commutable_vmsne_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmsne_vv: ; CHECK: # %bb.0: # %entry @@ -181,7 +170,6 @@ entry: ret %ret } -declare @llvm.riscv.vmsne.mask.nxv1i64(, , , , iXLen); define @commutable_vmsne_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmsne_vv_masked: ; CHECK: # %bb.0: @@ -197,7 +185,6 @@ define @commutable_vmsne_vv_masked( %0, @llvm.riscv.vmin.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmin_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmin_vv: ; CHECK: # %bb.0: # %entry @@ -213,7 +200,6 @@ entry: ret %ret } -declare @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmin_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmin_vv_masked: ; CHECK: # %bb.0: @@ -229,7 +215,6 @@ define @commutable_vmin_vv_masked( %0, @llvm.riscv.vminu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vminu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vminu_vv: ; CHECK: # %bb.0: # %entry @@ -245,7 +230,6 @@ entry: ret %ret } -declare @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vminu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vminu_vv_masked: ; CHECK: # %bb.0: @@ -261,7 +245,6 @@ define @commutable_vminu_vv_masked( %0, @llvm.riscv.vmax.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmax_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmax_vv: ; CHECK: # %bb.0: # %entry @@ -277,7 +260,6 @@ entry: ret %ret } -declare @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmax_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmax_vv_masked: ; CHECK: # %bb.0: @@ -293,7 +275,6 @@ define @commutable_vmax_vv_masked( %0, @llvm.riscv.vmaxu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmaxu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmaxu_vv: ; CHECK: # %bb.0: # %entry @@ -309,7 +290,6 @@ entry: ret %ret } -declare @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmaxu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmaxu_vv_masked: ; CHECK: # %bb.0: @@ -325,7 +305,6 @@ define @commutable_vmaxu_vv_masked( %0, @llvm.riscv.vmul.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmul_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmul_vv: ; CHECK: # %bb.0: # %entry @@ -341,7 +320,6 @@ entry: ret %ret } -declare @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmul_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmul_vv_masked: ; CHECK: # %bb.0: @@ -357,7 +335,6 @@ define @commutable_vmul_vv_masked( %0, @llvm.riscv.vmulh.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmulh_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmulh_vv: ; CHECK: # %bb.0: # %entry @@ -373,7 +350,6 @@ entry: ret %ret } -declare @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmulh_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmulh_vv_masked: ; CHECK: # %bb.0: @@ -389,7 +365,6 @@ define @commutable_vmulh_vv_masked( %0, @llvm.riscv.vmulhu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vmulhu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vmulhu_vv: ; CHECK: # %bb.0: # %entry @@ -405,7 +380,6 @@ entry: ret %ret } -declare @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vmulhu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vmulhu_vv_masked: ; CHECK: # %bb.0: @@ -421,7 +395,6 @@ define @commutable_vmulhu_vv_masked( %0, @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwadd_vv: ; CHECK: # %bb.0: # %entry @@ -437,7 +410,6 @@ entry: ret %ret } -declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwadd_vv_masked: ; CHECK: # %bb.0: @@ -453,7 +425,6 @@ define @commutable_vwadd_vv_masked( %0, @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwaddu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwaddu_vv: ; CHECK: # %bb.0: # %entry @@ -469,7 +440,6 @@ entry: ret %ret } -declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwaddu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwaddu_vv_masked: ; CHECK: # %bb.0: @@ -485,7 +455,6 @@ define @commutable_vwaddu_vv_masked( %0, @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwmul_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmul_vv: ; CHECK: # %bb.0: # %entry @@ -501,7 +470,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmul_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmul_vv_masked: ; CHECK: # %bb.0: @@ -517,7 +485,6 @@ define @commutable_vwmul_vv_masked( %0, @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(, , , iXLen); define @commutable_vwmulu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmulu_vv: ; CHECK: # %bb.0: # %entry @@ -533,7 +500,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmulu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmulu_vv_masked: ; CHECK: # %bb.0: @@ -549,7 +515,6 @@ define @commutable_vwmulu_vv_masked( %0, @llvm.riscv.vwmacc.nxv1i64.nxv1i32(, , , iXLen, iXLen); define @commutable_vwmacc_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmacc_vv: ; CHECK: # %bb.0: # %entry @@ -565,7 +530,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmacc_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmacc_vv_masked: ; CHECK: # %bb.0: @@ -582,7 +546,6 @@ define @commutable_vwmacc_vv_masked( %0, @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(, , , iXLen, iXLen); define @commutable_vwmaccu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vwmaccu_vv: ; CHECK: # %bb.0: # %entry @@ -598,7 +561,6 @@ entry: ret %ret } -declare @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(, , , , iXLen, iXLen); define @commutable_vwmaccu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vwmaccu_vv_masked: ; CHECK: # %bb.0: @@ -615,7 +577,6 @@ define @commutable_vwmaccu_vv_masked( %0, < } ; vadc.vvm -declare @llvm.riscv.vadc.nxv1i64.nxv1i64(, , , , iXLen); define @commutable_vadc_vv( %0, %1, %mask, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vadc_vv: ; CHECK: # %bb.0: # %entry @@ -632,7 +593,6 @@ entry: } ; vsadd.vv -declare @llvm.riscv.vsadd.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vsadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vsadd_vv: ; CHECK: # %bb.0: # %entry @@ -648,7 +608,6 @@ entry: ret %ret } -declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vsadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vsadd_vv_masked: ; CHECK: # %bb.0: @@ -664,7 +623,6 @@ define @commutable_vsadd_vv_masked( %0, @llvm.riscv.vsaddu.nxv1i64.nxv1i64(, , , iXLen); define @commutable_vsaddu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vsaddu_vv: ; CHECK: # %bb.0: # %entry @@ -680,7 +638,6 @@ entry: ret %ret } -declare @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen); define @commutable_vsaddu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vsaddu_vv_masked: ; CHECK: # %bb.0: @@ -696,7 +653,6 @@ define @commutable_vsaddu_vv_masked( %0, @llvm.riscv.vaadd.nxv1i64.nxv1i64(, , , iXLen, iXLen); define @commutable_vaadd_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vaadd_vv: ; CHECK: # %bb.0: # %entry @@ -713,7 +669,6 @@ entry: ret %ret } -declare @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen, iXLen); define @commutable_vaadd_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vaadd_vv_masked: ; CHECK: # %bb.0: @@ -730,7 +685,6 @@ define @commutable_vaadd_vv_masked( %0, @llvm.riscv.vaaddu.nxv1i64.nxv1i64(, , , iXLen, iXLen); define @commutable_vaaddu_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vaaddu_vv: ; CHECK: # %bb.0: # %entry @@ -747,7 +701,6 @@ entry: ret %ret } -declare @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen, iXLen); define @commutable_vaaddu_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vaaddu_vv_masked: ; CHECK: # %bb.0: @@ -764,7 +717,6 @@ define @commutable_vaaddu_vv_masked( %0, @llvm.riscv.vsmul.nxv1i64.nxv1i64(, , , iXLen, iXLen); define @commutable_vsmul_vv( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: commutable_vsmul_vv: ; CHECK: # %bb.0: # %entry @@ -781,7 +733,6 @@ entry: ret %ret } -declare @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(, , , , iXLen, iXLen, iXLen); define @commutable_vsmul_vv_masked( %0, %1, %mask, iXLen %2) { ; CHECK-LABEL: commutable_vsmul_vv_masked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll index 69822e9d9d2e3..4b6a115ade642 100644 --- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll @@ -836,36 +836,3 @@ entry: ret void } -declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i8(<16 x i8>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i8(<32 x i8>, ptr, <32 x i1>) -declare void @llvm.masked.compressstore.v64i8(<64 x i8>, ptr, <64 x i1>) -declare void @llvm.masked.compressstore.v128i8(<128 x i8>, ptr, <128 x i1>) -declare void @llvm.masked.compressstore.v256i8(<256 x i8>, ptr, <256 x i1>) - -declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i16(<16 x i16>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i16(<32 x i16>, ptr, <32 x i1>) -declare void @llvm.masked.compressstore.v64i16(<64 x i16>, ptr, <64 x i1>) -declare void @llvm.masked.compressstore.v128i16(<128 x i16>, ptr, <128 x i1>) - -declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i32(<16 x i32>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i32(<32 x i32>, ptr, <32 x i1>) -declare void @llvm.masked.compressstore.v64i32(<64 x i32>, ptr, <64 x i1>) - -declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>) -declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>) -declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>) -declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>) -declare void @llvm.masked.compressstore.v16i64(<16 x i64>, ptr, <16 x i1>) -declare void @llvm.masked.compressstore.v32i64(<32 x i64>, ptr, <32 x i1>) diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll index 593f8e2612fec..3248f3f34eedf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll @@ -77,4 +77,3 @@ entry: ret void } -declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll index 319d82f724ca7..1f45e45f23164 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -69,7 +69,6 @@ define @ctlz_nxv1i8( %va) { %a = call @llvm.ctlz.nxv1i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i8(, i1) define @ctlz_nxv2i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i8: @@ -132,7 +131,6 @@ define @ctlz_nxv2i8( %va) { %a = call @llvm.ctlz.nxv2i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i8(, i1) define @ctlz_nxv4i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i8: @@ -195,7 +193,6 @@ define @ctlz_nxv4i8( %va) { %a = call @llvm.ctlz.nxv4i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i8(, i1) define @ctlz_nxv8i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i8: @@ -258,7 +255,6 @@ define @ctlz_nxv8i8( %va) { %a = call @llvm.ctlz.nxv8i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i8(, i1) define @ctlz_nxv16i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i8: @@ -321,7 +317,6 @@ define @ctlz_nxv16i8( %va) { %a = call @llvm.ctlz.nxv16i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv16i8(, i1) define @ctlz_nxv32i8( %va) { ; CHECK-LABEL: ctlz_nxv32i8: @@ -356,7 +351,6 @@ define @ctlz_nxv32i8( %va) { %a = call @llvm.ctlz.nxv32i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv32i8(, i1) define @ctlz_nxv64i8( %va) { ; CHECK-LABEL: ctlz_nxv64i8: @@ -391,7 +385,6 @@ define @ctlz_nxv64i8( %va) { %a = call @llvm.ctlz.nxv64i8( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv64i8(, i1) define @ctlz_nxv1i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv1i16: @@ -457,7 +450,6 @@ define @ctlz_nxv1i16( %va) { %a = call @llvm.ctlz.nxv1i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i16(, i1) define @ctlz_nxv2i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i16: @@ -523,7 +515,6 @@ define @ctlz_nxv2i16( %va) { %a = call @llvm.ctlz.nxv2i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i16(, i1) define @ctlz_nxv4i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i16: @@ -589,7 +580,6 @@ define @ctlz_nxv4i16( %va) { %a = call @llvm.ctlz.nxv4i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i16(, i1) define @ctlz_nxv8i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i16: @@ -655,7 +645,6 @@ define @ctlz_nxv8i16( %va) { %a = call @llvm.ctlz.nxv8i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i16(, i1) define @ctlz_nxv16i16( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i16: @@ -721,7 +710,6 @@ define @ctlz_nxv16i16( %va) { %a = call @llvm.ctlz.nxv16i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv16i16(, i1) define @ctlz_nxv32i16( %va) { ; CHECK-LABEL: ctlz_nxv32i16: @@ -765,7 +753,6 @@ define @ctlz_nxv32i16( %va) { %a = call @llvm.ctlz.nxv32i16( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv32i16(, i1) define @ctlz_nxv1i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv1i32: @@ -837,7 +824,6 @@ define @ctlz_nxv1i32( %va) { %a = call @llvm.ctlz.nxv1i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i32(, i1) define @ctlz_nxv2i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i32: @@ -909,7 +895,6 @@ define @ctlz_nxv2i32( %va) { %a = call @llvm.ctlz.nxv2i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i32(, i1) define @ctlz_nxv4i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i32: @@ -981,7 +966,6 @@ define @ctlz_nxv4i32( %va) { %a = call @llvm.ctlz.nxv4i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i32(, i1) define @ctlz_nxv8i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i32: @@ -1053,7 +1037,6 @@ define @ctlz_nxv8i32( %va) { %a = call @llvm.ctlz.nxv8i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i32(, i1) define @ctlz_nxv16i32( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i32: @@ -1126,7 +1109,6 @@ define @ctlz_nxv16i32( %va) { %a = call @llvm.ctlz.nxv16i32( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv16i32(, i1) define @ctlz_nxv1i64( %va) { ; RV32I-LABEL: ctlz_nxv1i64: @@ -1266,7 +1248,6 @@ define @ctlz_nxv1i64( %va) { %a = call @llvm.ctlz.nxv1i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv1i64(, i1) define @ctlz_nxv2i64( %va) { ; RV32I-LABEL: ctlz_nxv2i64: @@ -1406,7 +1387,6 @@ define @ctlz_nxv2i64( %va) { %a = call @llvm.ctlz.nxv2i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv2i64(, i1) define @ctlz_nxv4i64( %va) { ; RV32I-LABEL: ctlz_nxv4i64: @@ -1546,7 +1526,6 @@ define @ctlz_nxv4i64( %va) { %a = call @llvm.ctlz.nxv4i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv4i64(, i1) define @ctlz_nxv8i64( %va) { ; RV32I-LABEL: ctlz_nxv8i64: @@ -1686,7 +1665,6 @@ define @ctlz_nxv8i64( %va) { %a = call @llvm.ctlz.nxv8i64( %va, i1 false) ret %a } -declare @llvm.ctlz.nxv8i64(, i1) define @ctlz_zero_undef_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv1i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll index 570ff34b0f23a..20f397b694180 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.ctlz.nxv1i8(, i1 immarg, , i32) - define @vp_ctlz_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i8: ; CHECK: # %bb.0: @@ -61,8 +59,6 @@ define @vp_ctlz_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv2i8(, i1 immarg, , i32) - define @vp_ctlz_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i8: ; CHECK: # %bb.0: @@ -114,8 +110,6 @@ define @vp_ctlz_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv4i8(, i1 immarg, , i32) - define @vp_ctlz_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i8: ; CHECK: # %bb.0: @@ -167,8 +161,6 @@ define @vp_ctlz_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv8i8(, i1 immarg, , i32) - define @vp_ctlz_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i8: ; CHECK: # %bb.0: @@ -220,8 +212,6 @@ define @vp_ctlz_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ctlz.nxv16i8(, i1 immarg, , i32) - define @vp_ctlz_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i8: ; CHECK: # %bb.0: @@ -273,8 +263,6 @@ define @vp_ctlz_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv32i8(, i1 immarg, , i32) - define @vp_ctlz_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv32i8: ; CHECK: # %bb.0: @@ -343,8 +331,6 @@ define @vp_ctlz_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv64i8(, i1 immarg, , i32) - define @vp_ctlz_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv64i8: ; CHECK: # %bb.0: @@ -413,8 +399,6 @@ define @vp_ctlz_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv1i16(, i1 immarg, , i32) - define @vp_ctlz_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i16: ; CHECK: # %bb.0: @@ -460,8 +444,6 @@ define @vp_ctlz_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv2i16(, i1 immarg, , i32) - define @vp_ctlz_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i16: ; CHECK: # %bb.0: @@ -507,8 +489,6 @@ define @vp_ctlz_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv4i16(, i1 immarg, , i32) - define @vp_ctlz_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i16: ; CHECK: # %bb.0: @@ -554,8 +534,6 @@ define @vp_ctlz_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv8i16(, i1 immarg, , i32) - define @vp_ctlz_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i16: ; CHECK: # %bb.0: @@ -601,8 +579,6 @@ define @vp_ctlz_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv16i16(, i1 immarg, , i32) - define @vp_ctlz_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i16: ; CHECK: # %bb.0: @@ -648,8 +624,6 @@ define @vp_ctlz_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.ctlz.nxv32i16(, i1 immarg, , i32) - define @vp_ctlz_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv32i16: ; CHECK: # %bb.0: @@ -736,8 +710,6 @@ define @vp_ctlz_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.ctlz.nxv1i32(, i1 immarg, , i32) - define @vp_ctlz_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i32: ; CHECK: # %bb.0: @@ -785,8 +757,6 @@ define @vp_ctlz_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv2i32(, i1 immarg, , i32) - define @vp_ctlz_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i32: ; CHECK: # %bb.0: @@ -834,8 +804,6 @@ define @vp_ctlz_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv4i32(, i1 immarg, , i32) - define @vp_ctlz_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i32: ; CHECK: # %bb.0: @@ -883,8 +851,6 @@ define @vp_ctlz_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv8i32(, i1 immarg, , i32) - define @vp_ctlz_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i32: ; CHECK: # %bb.0: @@ -932,8 +898,6 @@ define @vp_ctlz_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv16i32(, i1 immarg, , i32) - define @vp_ctlz_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i32: ; CHECK: # %bb.0: @@ -980,8 +944,6 @@ define @vp_ctlz_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.ctlz.nxv1i64(, i1 immarg, , i32) - define @vp_ctlz_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i64: ; CHECK: # %bb.0: @@ -1030,8 +992,6 @@ define @vp_ctlz_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv2i64(, i1 immarg, , i32) - define @vp_ctlz_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv2i64: ; CHECK: # %bb.0: @@ -1080,8 +1040,6 @@ define @vp_ctlz_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv4i64(, i1 immarg, , i32) - define @vp_ctlz_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv4i64: ; CHECK: # %bb.0: @@ -1130,8 +1088,6 @@ define @vp_ctlz_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv7i64(, i1 immarg, , i32) - define @vp_ctlz_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv7i64: ; CHECK: # %bb.0: @@ -1180,8 +1136,6 @@ define @vp_ctlz_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv8i64(, i1 immarg, , i32) - define @vp_ctlz_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv8i64: ; CHECK: # %bb.0: @@ -1230,8 +1184,6 @@ define @vp_ctlz_nxv8i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctlz.nxv16i64(, i1 immarg, , i32) - define @vp_ctlz_nxv16i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv16i64: ; CHECK: # %bb.0: @@ -1391,7 +1343,6 @@ define @vp_ctlz_zero_undef_nxv1i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i8: ; CHECK: # %bb.0: @@ -1439,7 +1390,6 @@ define @vp_ctlz_zero_undef_nxv2i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i8: ; CHECK: # %bb.0: @@ -1487,7 +1437,6 @@ define @vp_ctlz_zero_undef_nxv4i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i8: ; CHECK: # %bb.0: @@ -1535,7 +1484,6 @@ define @vp_ctlz_zero_undef_nxv8i8_unmasked( % ret %v } - define @vp_ctlz_zero_undef_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i8: ; CHECK: # %bb.0: @@ -1583,7 +1531,6 @@ define @vp_ctlz_zero_undef_nxv16i8_unmasked( %v } - define @vp_ctlz_zero_undef_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv32i8: ; CHECK: # %bb.0: @@ -1652,7 +1599,6 @@ define @vp_ctlz_zero_undef_nxv32i8_unmasked( %v } - define @vp_ctlz_zero_undef_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv64i8: ; CHECK: # %bb.0: @@ -1721,7 +1667,6 @@ define @vp_ctlz_zero_undef_nxv64i8_unmasked( %v } - define @vp_ctlz_zero_undef_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i16: ; CHECK: # %bb.0: @@ -1763,7 +1708,6 @@ define @vp_ctlz_zero_undef_nxv1i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i16: ; CHECK: # %bb.0: @@ -1805,7 +1749,6 @@ define @vp_ctlz_zero_undef_nxv2i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i16: ; CHECK: # %bb.0: @@ -1847,7 +1790,6 @@ define @vp_ctlz_zero_undef_nxv4i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i16: ; CHECK: # %bb.0: @@ -1889,7 +1831,6 @@ define @vp_ctlz_zero_undef_nxv8i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i16: ; CHECK: # %bb.0: @@ -1931,7 +1872,6 @@ define @vp_ctlz_zero_undef_nxv16i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv32i16: ; CHECK: # %bb.0: @@ -2018,7 +1958,6 @@ define @vp_ctlz_zero_undef_nxv32i16_unmasked( %v } - define @vp_ctlz_zero_undef_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i32: ; CHECK: # %bb.0: @@ -2062,7 +2001,6 @@ define @vp_ctlz_zero_undef_nxv1i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i32: ; CHECK: # %bb.0: @@ -2106,7 +2044,6 @@ define @vp_ctlz_zero_undef_nxv2i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i32: ; CHECK: # %bb.0: @@ -2150,7 +2087,6 @@ define @vp_ctlz_zero_undef_nxv4i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i32: ; CHECK: # %bb.0: @@ -2194,7 +2130,6 @@ define @vp_ctlz_zero_undef_nxv8i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i32: ; CHECK: # %bb.0: @@ -2237,7 +2172,6 @@ define @vp_ctlz_zero_undef_nxv16i32_unmasked( %v } - define @vp_ctlz_zero_undef_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i64: ; CHECK: # %bb.0: @@ -2282,7 +2216,6 @@ define @vp_ctlz_zero_undef_nxv1i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i64: ; CHECK: # %bb.0: @@ -2327,7 +2260,6 @@ define @vp_ctlz_zero_undef_nxv2i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i64: ; CHECK: # %bb.0: @@ -2372,7 +2304,6 @@ define @vp_ctlz_zero_undef_nxv4i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv7i64: ; CHECK: # %bb.0: @@ -2417,7 +2348,6 @@ define @vp_ctlz_zero_undef_nxv7i64_unmasked( %v } - define @vp_ctlz_zero_undef_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i64: ; CHECK: # %bb.0: @@ -2569,7 +2499,6 @@ define @vp_ctlz_zero_undef_nxv16i64_unmasked( @llvm.vp.ctlz.nxv1i9(, i1 immarg, , i32) define @vp_ctlz_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_nxv1i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll index 1018130b472d1..d137ad54193a5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -30,7 +30,6 @@ define @ctpop_nxv1i8( %va) { %a = call @llvm.ctpop.nxv1i8( %va) ret %a } -declare @llvm.ctpop.nxv1i8() define @ctpop_nxv2i8( %va) { ; CHECK-LABEL: ctpop_nxv2i8: @@ -58,7 +57,6 @@ define @ctpop_nxv2i8( %va) { %a = call @llvm.ctpop.nxv2i8( %va) ret %a } -declare @llvm.ctpop.nxv2i8() define @ctpop_nxv4i8( %va) { ; CHECK-LABEL: ctpop_nxv4i8: @@ -86,7 +84,6 @@ define @ctpop_nxv4i8( %va) { %a = call @llvm.ctpop.nxv4i8( %va) ret %a } -declare @llvm.ctpop.nxv4i8() define @ctpop_nxv8i8( %va) { ; CHECK-LABEL: ctpop_nxv8i8: @@ -114,7 +111,6 @@ define @ctpop_nxv8i8( %va) { %a = call @llvm.ctpop.nxv8i8( %va) ret %a } -declare @llvm.ctpop.nxv8i8() define @ctpop_nxv16i8( %va) { ; CHECK-LABEL: ctpop_nxv16i8: @@ -142,7 +138,6 @@ define @ctpop_nxv16i8( %va) { %a = call @llvm.ctpop.nxv16i8( %va) ret %a } -declare @llvm.ctpop.nxv16i8() define @ctpop_nxv32i8( %va) { ; CHECK-LABEL: ctpop_nxv32i8: @@ -170,7 +165,6 @@ define @ctpop_nxv32i8( %va) { %a = call @llvm.ctpop.nxv32i8( %va) ret %a } -declare @llvm.ctpop.nxv32i8() define @ctpop_nxv64i8( %va) { ; CHECK-LABEL: ctpop_nxv64i8: @@ -198,7 +192,6 @@ define @ctpop_nxv64i8( %va) { %a = call @llvm.ctpop.nxv64i8( %va) ret %a } -declare @llvm.ctpop.nxv64i8() define @ctpop_nxv1i16( %va) { ; CHECK-LABEL: ctpop_nxv1i16: @@ -233,7 +226,6 @@ define @ctpop_nxv1i16( %va) { %a = call @llvm.ctpop.nxv1i16( %va) ret %a } -declare @llvm.ctpop.nxv1i16() define @ctpop_nxv2i16( %va) { ; CHECK-LABEL: ctpop_nxv2i16: @@ -268,7 +260,6 @@ define @ctpop_nxv2i16( %va) { %a = call @llvm.ctpop.nxv2i16( %va) ret %a } -declare @llvm.ctpop.nxv2i16() define @ctpop_nxv4i16( %va) { ; CHECK-LABEL: ctpop_nxv4i16: @@ -303,7 +294,6 @@ define @ctpop_nxv4i16( %va) { %a = call @llvm.ctpop.nxv4i16( %va) ret %a } -declare @llvm.ctpop.nxv4i16() define @ctpop_nxv8i16( %va) { ; CHECK-LABEL: ctpop_nxv8i16: @@ -338,7 +328,6 @@ define @ctpop_nxv8i16( %va) { %a = call @llvm.ctpop.nxv8i16( %va) ret %a } -declare @llvm.ctpop.nxv8i16() define @ctpop_nxv16i16( %va) { ; CHECK-LABEL: ctpop_nxv16i16: @@ -373,7 +362,6 @@ define @ctpop_nxv16i16( %va) { %a = call @llvm.ctpop.nxv16i16( %va) ret %a } -declare @llvm.ctpop.nxv16i16() define @ctpop_nxv32i16( %va) { ; CHECK-LABEL: ctpop_nxv32i16: @@ -408,7 +396,6 @@ define @ctpop_nxv32i16( %va) { %a = call @llvm.ctpop.nxv32i16( %va) ret %a } -declare @llvm.ctpop.nxv32i16() define @ctpop_nxv1i32( %va) { ; CHECK-LABEL: ctpop_nxv1i32: @@ -444,7 +431,6 @@ define @ctpop_nxv1i32( %va) { %a = call @llvm.ctpop.nxv1i32( %va) ret %a } -declare @llvm.ctpop.nxv1i32() define @ctpop_nxv2i32( %va) { ; CHECK-LABEL: ctpop_nxv2i32: @@ -480,7 +466,6 @@ define @ctpop_nxv2i32( %va) { %a = call @llvm.ctpop.nxv2i32( %va) ret %a } -declare @llvm.ctpop.nxv2i32() define @ctpop_nxv4i32( %va) { ; CHECK-LABEL: ctpop_nxv4i32: @@ -516,7 +501,6 @@ define @ctpop_nxv4i32( %va) { %a = call @llvm.ctpop.nxv4i32( %va) ret %a } -declare @llvm.ctpop.nxv4i32() define @ctpop_nxv8i32( %va) { ; CHECK-LABEL: ctpop_nxv8i32: @@ -552,7 +536,6 @@ define @ctpop_nxv8i32( %va) { %a = call @llvm.ctpop.nxv8i32( %va) ret %a } -declare @llvm.ctpop.nxv8i32() define @ctpop_nxv16i32( %va) { ; CHECK-LABEL: ctpop_nxv16i32: @@ -670,8 +653,6 @@ define @ctpop_nxv16i32_ne_one( %va) { ret %cmp } -declare @llvm.ctpop.nxv16i32() - define @ctpop_nxv1i64( %va) { ; RV32-LABEL: ctpop_nxv1i64: ; RV32: # %bb.0: @@ -753,7 +734,6 @@ define @ctpop_nxv1i64( %va) { %a = call @llvm.ctpop.nxv1i64( %va) ret %a } -declare @llvm.ctpop.nxv1i64() define @ctpop_nxv2i64( %va) { ; RV32-LABEL: ctpop_nxv2i64: @@ -836,7 +816,6 @@ define @ctpop_nxv2i64( %va) { %a = call @llvm.ctpop.nxv2i64( %va) ret %a } -declare @llvm.ctpop.nxv2i64() define @ctpop_nxv4i64( %va) { ; RV32-LABEL: ctpop_nxv4i64: @@ -919,7 +898,6 @@ define @ctpop_nxv4i64( %va) { %a = call @llvm.ctpop.nxv4i64( %va) ret %a } -declare @llvm.ctpop.nxv4i64() define @ctpop_nxv8i64( %va) { ; RV32-LABEL: ctpop_nxv8i64: @@ -1084,4 +1062,3 @@ define @ctpop_nxv8i64_ne_one( %va) { ret %cmp } -declare @llvm.ctpop.nxv8i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll index ee18a426c1b12..1bbefc65d3e39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.ctpop.nxv1i8(, , i32) - define @vp_ctpop_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +62,6 @@ define @vp_ctpop_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv2i8(, , i32) - define @vp_ctpop_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv2i8: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_ctpop_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv4i8(, , i32) - define @vp_ctpop_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv4i8: ; CHECK: # %bb.0: @@ -176,8 +170,6 @@ define @vp_ctpop_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv8i8(, , i32) - define @vp_ctpop_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv8i8: ; CHECK: # %bb.0: @@ -232,8 +224,6 @@ define @vp_ctpop_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ctpop.nxv16i8(, , i32) - define @vp_ctpop_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv16i8: ; CHECK: # %bb.0: @@ -288,8 +278,6 @@ define @vp_ctpop_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv32i8(, , i32) - define @vp_ctpop_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv32i8: ; CHECK: # %bb.0: @@ -344,8 +332,6 @@ define @vp_ctpop_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv64i8(, , i32) - define @vp_ctpop_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv64i8: ; CHECK: # %bb.0: @@ -400,8 +386,6 @@ define @vp_ctpop_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv1i16(, , i32) - define @vp_ctpop_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i16: ; CHECK: # %bb.0: @@ -470,8 +454,6 @@ define @vp_ctpop_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv2i16(, , i32) - define @vp_ctpop_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv2i16: ; CHECK: # %bb.0: @@ -540,8 +522,6 @@ define @vp_ctpop_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv4i16(, , i32) - define @vp_ctpop_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv4i16: ; CHECK: # %bb.0: @@ -610,8 +590,6 @@ define @vp_ctpop_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv8i16(, , i32) - define @vp_ctpop_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv8i16: ; CHECK: # %bb.0: @@ -680,8 +658,6 @@ define @vp_ctpop_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv16i16(, , i32) - define @vp_ctpop_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv16i16: ; CHECK: # %bb.0: @@ -750,8 +726,6 @@ define @vp_ctpop_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.ctpop.nxv32i16(, , i32) - define @vp_ctpop_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv32i16: ; CHECK: # %bb.0: @@ -820,8 +794,6 @@ define @vp_ctpop_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.ctpop.nxv1i32(, , i32) - define @vp_ctpop_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i32: ; CHECK: # %bb.0: @@ -892,8 +864,6 @@ define @vp_ctpop_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv2i32(, , i32) - define @vp_ctpop_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv2i32: ; CHECK: # %bb.0: @@ -964,8 +934,6 @@ define @vp_ctpop_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv4i32(, , i32) - define @vp_ctpop_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv4i32: ; CHECK: # %bb.0: @@ -1036,8 +1004,6 @@ define @vp_ctpop_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv8i32(, , i32) - define @vp_ctpop_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv8i32: ; CHECK: # %bb.0: @@ -1108,8 +1074,6 @@ define @vp_ctpop_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv16i32(, , i32) - define @vp_ctpop_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv16i32: ; CHECK: # %bb.0: @@ -1180,8 +1144,6 @@ define @vp_ctpop_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.ctpop.nxv1i64(, , i32) - define @vp_ctpop_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv1i64: ; RV32: # %bb.0: @@ -1346,8 +1308,6 @@ define @vp_ctpop_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv2i64(, , i32) - define @vp_ctpop_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv2i64: ; RV32: # %bb.0: @@ -1512,8 +1472,6 @@ define @vp_ctpop_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv4i64(, , i32) - define @vp_ctpop_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv4i64: ; RV32: # %bb.0: @@ -1678,8 +1636,6 @@ define @vp_ctpop_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv7i64(, , i32) - define @vp_ctpop_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv7i64: ; RV32: # %bb.0: @@ -1844,8 +1800,6 @@ define @vp_ctpop_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv8i64(, , i32) - define @vp_ctpop_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv8i64: ; RV32: # %bb.0: @@ -2010,8 +1964,6 @@ define @vp_ctpop_nxv8i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ctpop.nxv16i64(, , i32) - define @vp_ctpop_nxv16i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_nxv16i64: ; RV32: # %bb.0: @@ -2430,7 +2382,6 @@ define @vp_ctpop_nxv16i64_unmasked( %va, } ; Test promotion. -declare @llvm.vp.ctpop.nxv1i9(, , i32) define @vp_ctpop_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_nxv1i9: diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll index faa3c48c49e50..79af06db4171e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -71,7 +71,6 @@ define @cttz_nxv1i8( %va) { %a = call @llvm.cttz.nxv1i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i8(, i1) define @cttz_nxv2i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv2i8: @@ -136,7 +135,6 @@ define @cttz_nxv2i8( %va) { %a = call @llvm.cttz.nxv2i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i8(, i1) define @cttz_nxv4i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv4i8: @@ -201,7 +199,6 @@ define @cttz_nxv4i8( %va) { %a = call @llvm.cttz.nxv4i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i8(, i1) define @cttz_nxv8i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv8i8: @@ -266,7 +263,6 @@ define @cttz_nxv8i8( %va) { %a = call @llvm.cttz.nxv8i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i8(, i1) define @cttz_nxv16i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv16i8: @@ -331,7 +327,6 @@ define @cttz_nxv16i8( %va) { %a = call @llvm.cttz.nxv16i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv16i8(, i1) define @cttz_nxv32i8( %va) { ; CHECK-LABEL: cttz_nxv32i8: @@ -362,7 +357,6 @@ define @cttz_nxv32i8( %va) { %a = call @llvm.cttz.nxv32i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv32i8(, i1) define @cttz_nxv64i8( %va) { ; CHECK-LABEL: cttz_nxv64i8: @@ -393,7 +387,6 @@ define @cttz_nxv64i8( %va) { %a = call @llvm.cttz.nxv64i8( %va, i1 false) ret %a } -declare @llvm.cttz.nxv64i8(, i1) define @cttz_nxv1i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv1i16: @@ -459,7 +452,6 @@ define @cttz_nxv1i16( %va) { %a = call @llvm.cttz.nxv1i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i16(, i1) define @cttz_nxv2i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv2i16: @@ -525,7 +517,6 @@ define @cttz_nxv2i16( %va) { %a = call @llvm.cttz.nxv2i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i16(, i1) define @cttz_nxv4i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv4i16: @@ -591,7 +582,6 @@ define @cttz_nxv4i16( %va) { %a = call @llvm.cttz.nxv4i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i16(, i1) define @cttz_nxv8i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv8i16: @@ -657,7 +647,6 @@ define @cttz_nxv8i16( %va) { %a = call @llvm.cttz.nxv8i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i16(, i1) define @cttz_nxv16i16( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv16i16: @@ -723,7 +712,6 @@ define @cttz_nxv16i16( %va) { %a = call @llvm.cttz.nxv16i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv16i16(, i1) define @cttz_nxv32i16( %va) { ; CHECK-LABEL: cttz_nxv32i16: @@ -761,7 +749,6 @@ define @cttz_nxv32i16( %va) { %a = call @llvm.cttz.nxv32i16( %va, i1 false) ret %a } -declare @llvm.cttz.nxv32i16(, i1) define @cttz_nxv1i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv1i32: @@ -831,7 +818,6 @@ define @cttz_nxv1i32( %va) { %a = call @llvm.cttz.nxv1i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i32(, i1) define @cttz_nxv2i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv2i32: @@ -901,7 +887,6 @@ define @cttz_nxv2i32( %va) { %a = call @llvm.cttz.nxv2i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i32(, i1) define @cttz_nxv4i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv4i32: @@ -971,7 +956,6 @@ define @cttz_nxv4i32( %va) { %a = call @llvm.cttz.nxv4i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i32(, i1) define @cttz_nxv8i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv8i32: @@ -1041,7 +1025,6 @@ define @cttz_nxv8i32( %va) { %a = call @llvm.cttz.nxv8i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i32(, i1) define @cttz_nxv16i32( %va) { ; CHECK-ZVE64X-LABEL: cttz_nxv16i32: @@ -1112,7 +1095,6 @@ define @cttz_nxv16i32( %va) { %a = call @llvm.cttz.nxv16i32( %va, i1 false) ret %a } -declare @llvm.cttz.nxv16i32(, i1) define @cttz_nxv1i64( %va) { ; RV32I-LABEL: cttz_nxv1i64: @@ -1236,7 +1218,6 @@ define @cttz_nxv1i64( %va) { %a = call @llvm.cttz.nxv1i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv1i64(, i1) define @cttz_nxv2i64( %va) { ; RV32I-LABEL: cttz_nxv2i64: @@ -1360,7 +1341,6 @@ define @cttz_nxv2i64( %va) { %a = call @llvm.cttz.nxv2i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv2i64(, i1) define @cttz_nxv4i64( %va) { ; RV32I-LABEL: cttz_nxv4i64: @@ -1484,7 +1464,6 @@ define @cttz_nxv4i64( %va) { %a = call @llvm.cttz.nxv4i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv4i64(, i1) define @cttz_nxv8i64( %va) { ; RV32I-LABEL: cttz_nxv8i64: @@ -1608,7 +1587,6 @@ define @cttz_nxv8i64( %va) { %a = call @llvm.cttz.nxv8i64( %va, i1 false) ret %a } -declare @llvm.cttz.nxv8i64(, i1) define @cttz_zero_undef_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv1i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll index 52eaa51051631..c82ad17545a6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB -declare @llvm.vp.cttz.nxv1i8(, i1 immarg, , i32) - define @vp_cttz_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i8: ; CHECK: # %bb.0: @@ -70,8 +68,6 @@ define @vp_cttz_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv2i8(, i1 immarg, , i32) - define @vp_cttz_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv2i8: ; CHECK: # %bb.0: @@ -132,8 +128,6 @@ define @vp_cttz_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv4i8(, i1 immarg, , i32) - define @vp_cttz_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv4i8: ; CHECK: # %bb.0: @@ -194,8 +188,6 @@ define @vp_cttz_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv8i8(, i1 immarg, , i32) - define @vp_cttz_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv8i8: ; CHECK: # %bb.0: @@ -256,8 +248,6 @@ define @vp_cttz_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.cttz.nxv16i8(, i1 immarg, , i32) - define @vp_cttz_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv16i8: ; CHECK: # %bb.0: @@ -318,8 +308,6 @@ define @vp_cttz_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv32i8(, i1 immarg, , i32) - define @vp_cttz_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv32i8: ; CHECK: # %bb.0: @@ -380,8 +368,6 @@ define @vp_cttz_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv64i8(, i1 immarg, , i32) - define @vp_cttz_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv64i8: ; CHECK: # %bb.0: @@ -442,8 +428,6 @@ define @vp_cttz_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv1i16(, i1 immarg, , i32) - define @vp_cttz_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i16: ; CHECK: # %bb.0: @@ -518,8 +502,6 @@ define @vp_cttz_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv2i16(, i1 immarg, , i32) - define @vp_cttz_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv2i16: ; CHECK: # %bb.0: @@ -594,8 +576,6 @@ define @vp_cttz_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv4i16(, i1 immarg, , i32) - define @vp_cttz_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv4i16: ; CHECK: # %bb.0: @@ -670,8 +650,6 @@ define @vp_cttz_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv8i16(, i1 immarg, , i32) - define @vp_cttz_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv8i16: ; CHECK: # %bb.0: @@ -746,8 +724,6 @@ define @vp_cttz_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv16i16(, i1 immarg, , i32) - define @vp_cttz_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv16i16: ; CHECK: # %bb.0: @@ -822,8 +798,6 @@ define @vp_cttz_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.cttz.nxv32i16(, i1 immarg, , i32) - define @vp_cttz_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv32i16: ; CHECK: # %bb.0: @@ -898,8 +872,6 @@ define @vp_cttz_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.cttz.nxv1i32(, i1 immarg, , i32) - define @vp_cttz_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i32: ; CHECK: # %bb.0: @@ -976,8 +948,6 @@ define @vp_cttz_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv2i32(, i1 immarg, , i32) - define @vp_cttz_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv2i32: ; CHECK: # %bb.0: @@ -1054,8 +1024,6 @@ define @vp_cttz_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv4i32(, i1 immarg, , i32) - define @vp_cttz_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv4i32: ; CHECK: # %bb.0: @@ -1132,8 +1100,6 @@ define @vp_cttz_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv8i32(, i1 immarg, , i32) - define @vp_cttz_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv8i32: ; CHECK: # %bb.0: @@ -1210,8 +1176,6 @@ define @vp_cttz_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv16i32(, i1 immarg, , i32) - define @vp_cttz_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv16i32: ; CHECK: # %bb.0: @@ -1288,8 +1252,6 @@ define @vp_cttz_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.cttz.nxv1i64(, i1 immarg, , i32) - define @vp_cttz_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv1i64: ; RV32: # %bb.0: @@ -1466,8 +1428,6 @@ define @vp_cttz_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv2i64(, i1 immarg, , i32) - define @vp_cttz_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv2i64: ; RV32: # %bb.0: @@ -1644,8 +1604,6 @@ define @vp_cttz_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv4i64(, i1 immarg, , i32) - define @vp_cttz_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv4i64: ; RV32: # %bb.0: @@ -1822,8 +1780,6 @@ define @vp_cttz_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv7i64(, i1 immarg, , i32) - define @vp_cttz_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv7i64: ; RV32: # %bb.0: @@ -2000,8 +1956,6 @@ define @vp_cttz_nxv7i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv8i64(, i1 immarg, , i32) - define @vp_cttz_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv8i64: ; RV32: # %bb.0: @@ -2178,8 +2132,6 @@ define @vp_cttz_nxv8i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.cttz.nxv16i64(, i1 immarg, , i32) - define @vp_cttz_nxv16i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_nxv16i64: ; RV32: # %bb.0: @@ -2664,7 +2616,6 @@ define @vp_cttz_zero_undef_nxv1i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i8: ; CHECK: # %bb.0: @@ -2718,7 +2669,6 @@ define @vp_cttz_zero_undef_nxv2i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i8: ; CHECK: # %bb.0: @@ -2772,7 +2722,6 @@ define @vp_cttz_zero_undef_nxv4i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i8: ; CHECK: # %bb.0: @@ -2826,7 +2775,6 @@ define @vp_cttz_zero_undef_nxv8i8_unmasked( % ret %v } - define @vp_cttz_zero_undef_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv16i8: ; CHECK: # %bb.0: @@ -2880,7 +2828,6 @@ define @vp_cttz_zero_undef_nxv16i8_unmasked( %v } - define @vp_cttz_zero_undef_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv32i8: ; CHECK: # %bb.0: @@ -2941,7 +2888,6 @@ define @vp_cttz_zero_undef_nxv32i8_unmasked( %v } - define @vp_cttz_zero_undef_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv64i8: ; CHECK: # %bb.0: @@ -3002,7 +2948,6 @@ define @vp_cttz_zero_undef_nxv64i8_unmasked( %v } - define @vp_cttz_zero_undef_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv1i16: ; CHECK: # %bb.0: @@ -3048,7 +2993,6 @@ define @vp_cttz_zero_undef_nxv1i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i16: ; CHECK: # %bb.0: @@ -3094,7 +3038,6 @@ define @vp_cttz_zero_undef_nxv2i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i16: ; CHECK: # %bb.0: @@ -3140,7 +3083,6 @@ define @vp_cttz_zero_undef_nxv4i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i16: ; CHECK: # %bb.0: @@ -3186,7 +3128,6 @@ define @vp_cttz_zero_undef_nxv8i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv16i16: ; CHECK: # %bb.0: @@ -3232,7 +3173,6 @@ define @vp_cttz_zero_undef_nxv16i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv32i16: ; CHECK: # %bb.0: @@ -3307,7 +3247,6 @@ define @vp_cttz_zero_undef_nxv32i16_unmasked( %v } - define @vp_cttz_zero_undef_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv1i32: ; CHECK: # %bb.0: @@ -3355,7 +3294,6 @@ define @vp_cttz_zero_undef_nxv1i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i32: ; CHECK: # %bb.0: @@ -3403,7 +3341,6 @@ define @vp_cttz_zero_undef_nxv2i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i32: ; CHECK: # %bb.0: @@ -3451,7 +3388,6 @@ define @vp_cttz_zero_undef_nxv4i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i32: ; CHECK: # %bb.0: @@ -3499,7 +3435,6 @@ define @vp_cttz_zero_undef_nxv8i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv16i32: ; CHECK: # %bb.0: @@ -3546,7 +3481,6 @@ define @vp_cttz_zero_undef_nxv16i32_unmasked( %v } - define @vp_cttz_zero_undef_nxv1i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv1i64: ; CHECK: # %bb.0: @@ -3595,7 +3529,6 @@ define @vp_cttz_zero_undef_nxv1i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv2i64: ; CHECK: # %bb.0: @@ -3644,7 +3577,6 @@ define @vp_cttz_zero_undef_nxv2i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv4i64: ; CHECK: # %bb.0: @@ -3693,7 +3625,6 @@ define @vp_cttz_zero_undef_nxv4i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv7i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv7i64: ; CHECK: # %bb.0: @@ -3742,7 +3673,6 @@ define @vp_cttz_zero_undef_nxv7i64_unmasked( %v } - define @vp_cttz_zero_undef_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_zero_undef_nxv8i64: ; CHECK: # %bb.0: @@ -3906,7 +3836,6 @@ define @vp_cttz_zero_undef_nxv16i64_unmasked( @llvm.vp.cttz.nxv1i9(, i1 immarg, , i32) define @vp_cttz_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_nxv1i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir index 5221fa73525cc..1e8aea92c9780 100644 --- a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir +++ b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir @@ -57,9 +57,6 @@ unreachable, !dbg !8 } - ; Function Attrs: nounwind readnone speculatable willreturn - declare void @llvm.dbg.value(metadata, metadata, metadata) - !llvm.dbg.cu = !{!0} !llvm.debugify = !{!3, !3} !llvm.module.flags = !{!4} @@ -92,7 +89,6 @@ !25 = !{!26} !26 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_bregx, 7202, 0, DW_OP_constu, 1, DW_OP_div, DW_OP_constu, 1, DW_OP_mul)) - ... --- name: foo diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll index 31fa5d025156f..9baa2f71abb8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll @@ -81,8 +81,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare i64 @llvm.vscale.i64() - define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) { ; NO-SINK-LABEL: sink_splat_add_scalable: ; NO-SINK: # %bb.0: # %entry @@ -261,8 +259,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; NO-SINK-LABEL: sink_splat_vp_add: ; NO-SINK: # %bb.0: # %entry @@ -578,8 +574,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; NO-SINK-LABEL: sink_splat_vp_fadd: ; NO-SINK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll index 51dc7b0714d7f..fb4391a5e30c1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll @@ -8,8 +8,6 @@ ; trunc ; ================================================================================ -declare @llvm.trunc.nxv1f64() - define @trunc_nxv1f64_to_si8( %x) { ; RV32-LABEL: trunc_nxv1f64_to_si8: ; RV32: # %bb.0: @@ -254,8 +252,6 @@ define @trunc_nxv1f64_to_ui64( %x) { ; trunc ; ================================================================================ -declare @llvm.trunc.nxv4f64() - define @trunc_nxv4f64_to_si8( %x) { ; RV32-LABEL: trunc_nxv4f64_to_si8: ; RV32: # %bb.0: @@ -500,8 +496,6 @@ define @trunc_nxv4f64_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv1f64() - define @ceil_nxv1f64_to_si8( %x) { ; RV32-LABEL: ceil_nxv1f64_to_si8: ; RV32: # %bb.0: @@ -778,8 +772,6 @@ define @ceil_nxv1f64_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv4f64() - define @ceil_nxv4f64_to_si8( %x) { ; RV32-LABEL: ceil_nxv4f64_to_si8: ; RV32: # %bb.0: @@ -1056,8 +1048,6 @@ define @ceil_nxv4f64_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv1f64() - define @rint_nxv1f64_to_si8( %x) { ; RV32-LABEL: rint_nxv1f64_to_si8: ; RV32: # %bb.0: @@ -1302,8 +1292,6 @@ define @rint_nxv1f64_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv4f64() - define @rint_nxv4f64_to_si8( %x) { ; RV32-LABEL: rint_nxv4f64_to_si8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll b/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll index 81b2b6594890e..fab8363dc7cde 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expand-no-v.ll @@ -3,8 +3,6 @@ ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 ; Should expand to scalar instructions and do not crash -declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vpreduce_add_v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) { ; RV32-LABEL: vpreduce_add_v4i32: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll index 7c9a283dd54bc..2245ce8f5fa12 100644 --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -548,27 +548,3 @@ define @extract_nxv6bf16_nxv12bf16_6( %res } -declare @llvm.vector.extract.nxv6f16.nxv12f16(, i64) - -declare @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 %idx) -declare @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv1i32.nxv2i32( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 %idx) -declare @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv4i1( %vec, i64 %idx) -declare @llvm.vector.extract.nxv16i1( %vec, i64 %idx) - -declare @llvm.vector.extract.nxv2i1( %vec, i64 %idx) -declare @llvm.vector.extract.nxv8i1( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll index 903c0dcaba2d8..241f619b1133f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -1472,8 +1472,6 @@ define void @store_vfmv_f_s_nxv8f64(ptr %x, ptr %p) { ret void } -declare double @llvm.riscv.vfmv.f.s.nxv8f64() - define float @extractelt_fadd_nxv4f32_splat( %x) { ; CHECK-LABEL: extractelt_fadd_nxv4f32_splat: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll index 316a84f98be2b..1f4eaea90628b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll @@ -26,7 +26,6 @@ define @ceil_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv1f16(, metadata) define @ceil_nxv2f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv2f16: @@ -50,7 +49,6 @@ define @ceil_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv2f16(, metadata) define @ceil_nxv4f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv4f16: @@ -74,7 +72,6 @@ define @ceil_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv4f16(, metadata) define @ceil_nxv8f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv8f16: @@ -98,7 +95,6 @@ define @ceil_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv8f16(, metadata) define @ceil_nxv16f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv16f16: @@ -122,7 +118,6 @@ define @ceil_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv16f16(, metadata) define @ceil_nxv32f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv32f16: @@ -146,7 +141,6 @@ define @ceil_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv32f16(, metadata) define @ceil_nxv1f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv1f32: @@ -169,7 +163,6 @@ define @ceil_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv1f32(, metadata) define @ceil_nxv2f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv2f32: @@ -192,7 +185,6 @@ define @ceil_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv2f32(, metadata) define @ceil_nxv4f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv4f32: @@ -215,7 +207,6 @@ define @ceil_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv4f32(, metadata) define @ceil_nxv8f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv8f32: @@ -238,7 +229,6 @@ define @ceil_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv8f32(, metadata) define @ceil_nxv16f32( %x) strictfp { ; CHECK-LABEL: ceil_nxv16f32: @@ -261,7 +251,6 @@ define @ceil_nxv16f32( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv16f32(, metadata) define @ceil_nxv1f64( %x) strictfp { ; RV32-LABEL: ceil_nxv1f64: @@ -303,7 +292,6 @@ define @ceil_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv1f64(, metadata) define @ceil_nxv2f64( %x) strictfp { ; RV32-LABEL: ceil_nxv2f64: @@ -345,7 +333,6 @@ define @ceil_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv2f64(, metadata) define @ceil_nxv4f64( %x) strictfp { ; RV32-LABEL: ceil_nxv4f64: @@ -387,7 +374,6 @@ define @ceil_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv4f64(, metadata) define @ceil_nxv8f64( %x) strictfp { ; RV32-LABEL: ceil_nxv8f64: @@ -429,4 +415,3 @@ define @ceil_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.ceil.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.ceil.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll index 56edec1cc7a68..504930f07bb13 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -202,7 +202,6 @@ define @ceil_nxv1f16( %x) { %a = call @llvm.ceil.nxv1f16( %x) ret %a } -declare @llvm.ceil.nxv1f16() define @ceil_nxv2f16( %x) { ; ZVFH-LABEL: ceil_nxv2f16: @@ -242,7 +241,6 @@ define @ceil_nxv2f16( %x) { %a = call @llvm.ceil.nxv2f16( %x) ret %a } -declare @llvm.ceil.nxv2f16() define @ceil_nxv4f16( %x) { ; ZVFH-LABEL: ceil_nxv4f16: @@ -282,7 +280,6 @@ define @ceil_nxv4f16( %x) { %a = call @llvm.ceil.nxv4f16( %x) ret %a } -declare @llvm.ceil.nxv4f16() define @ceil_nxv8f16( %x) { ; ZVFH-LABEL: ceil_nxv8f16: @@ -322,7 +319,6 @@ define @ceil_nxv8f16( %x) { %a = call @llvm.ceil.nxv8f16( %x) ret %a } -declare @llvm.ceil.nxv8f16() define @ceil_nxv16f16( %x) { ; ZVFH-LABEL: ceil_nxv16f16: @@ -362,7 +358,6 @@ define @ceil_nxv16f16( %x) { %a = call @llvm.ceil.nxv16f16( %x) ret %a } -declare @llvm.ceil.nxv16f16() define @ceil_nxv32f16( %x) { ; ZVFH-LABEL: ceil_nxv32f16: @@ -416,7 +411,6 @@ define @ceil_nxv32f16( %x) { %a = call @llvm.ceil.nxv32f16( %x) ret %a } -declare @llvm.ceil.nxv32f16() define @ceil_nxv1f32( %x) { ; CHECK-LABEL: ceil_nxv1f32: @@ -436,7 +430,6 @@ define @ceil_nxv1f32( %x) { %a = call @llvm.ceil.nxv1f32( %x) ret %a } -declare @llvm.ceil.nxv1f32() define @ceil_nxv2f32( %x) { ; CHECK-LABEL: ceil_nxv2f32: @@ -456,7 +449,6 @@ define @ceil_nxv2f32( %x) { %a = call @llvm.ceil.nxv2f32( %x) ret %a } -declare @llvm.ceil.nxv2f32() define @ceil_nxv4f32( %x) { ; CHECK-LABEL: ceil_nxv4f32: @@ -476,7 +468,6 @@ define @ceil_nxv4f32( %x) { %a = call @llvm.ceil.nxv4f32( %x) ret %a } -declare @llvm.ceil.nxv4f32() define @ceil_nxv8f32( %x) { ; CHECK-LABEL: ceil_nxv8f32: @@ -496,7 +487,6 @@ define @ceil_nxv8f32( %x) { %a = call @llvm.ceil.nxv8f32( %x) ret %a } -declare @llvm.ceil.nxv8f32() define @ceil_nxv16f32( %x) { ; CHECK-LABEL: ceil_nxv16f32: @@ -516,7 +506,6 @@ define @ceil_nxv16f32( %x) { %a = call @llvm.ceil.nxv16f32( %x) ret %a } -declare @llvm.ceil.nxv16f32() define @ceil_nxv1f64( %x) { ; RV32ZFH-LABEL: ceil_nxv1f64: @@ -583,7 +572,6 @@ define @ceil_nxv1f64( %x) { %a = call @llvm.ceil.nxv1f64( %x) ret %a } -declare @llvm.ceil.nxv1f64() define @ceil_nxv2f64( %x) { ; RV32ZFH-LABEL: ceil_nxv2f64: @@ -650,7 +638,6 @@ define @ceil_nxv2f64( %x) { %a = call @llvm.ceil.nxv2f64( %x) ret %a } -declare @llvm.ceil.nxv2f64() define @ceil_nxv4f64( %x) { ; RV32ZFH-LABEL: ceil_nxv4f64: @@ -717,7 +704,6 @@ define @ceil_nxv4f64( %x) { %a = call @llvm.ceil.nxv4f64( %x) ret %a } -declare @llvm.ceil.nxv4f64() define @ceil_nxv8f64( %x) { ; RV32ZFH-LABEL: ceil_nxv8f64: @@ -784,4 +770,3 @@ define @ceil_nxv8f64( %x) { %a = call @llvm.ceil.nxv8f64( %x) ret %a } -declare @llvm.ceil.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll index 7045fc7c50847..3a7de21c14390 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll @@ -26,7 +26,6 @@ define @floor_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv1f16(, metadata) define @floor_nxv2f16( %x) strictfp { ; CHECK-LABEL: floor_nxv2f16: @@ -50,7 +49,6 @@ define @floor_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv2f16(, metadata) define @floor_nxv4f16( %x) strictfp { ; CHECK-LABEL: floor_nxv4f16: @@ -74,7 +72,6 @@ define @floor_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv4f16(, metadata) define @floor_nxv8f16( %x) strictfp { ; CHECK-LABEL: floor_nxv8f16: @@ -98,7 +95,6 @@ define @floor_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv8f16(, metadata) define @floor_nxv16f16( %x) strictfp { ; CHECK-LABEL: floor_nxv16f16: @@ -122,7 +118,6 @@ define @floor_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv16f16(, metadata) define @floor_nxv32f16( %x) strictfp { ; CHECK-LABEL: floor_nxv32f16: @@ -146,7 +141,6 @@ define @floor_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv32f16(, metadata) define @floor_nxv1f32( %x) strictfp { ; CHECK-LABEL: floor_nxv1f32: @@ -169,7 +163,6 @@ define @floor_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv1f32(, metadata) define @floor_nxv2f32( %x) strictfp { ; CHECK-LABEL: floor_nxv2f32: @@ -192,7 +185,6 @@ define @floor_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv2f32(, metadata) define @floor_nxv4f32( %x) strictfp { ; CHECK-LABEL: floor_nxv4f32: @@ -215,7 +207,6 @@ define @floor_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv4f32(, metadata) define @floor_nxv8f32( %x) strictfp { ; CHECK-LABEL: floor_nxv8f32: @@ -238,7 +229,6 @@ define @floor_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv8f32(, metadata) define @floor_nxv16f32( %x) strictfp { ; CHECK-LABEL: floor_nxv16f32: @@ -261,7 +251,6 @@ define @floor_nxv16f32( %x) strictfp %a = call @llvm.experimental.constrained.floor.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv16f32(, metadata) define @floor_nxv1f64( %x) strictfp { ; RV32-LABEL: floor_nxv1f64: @@ -303,7 +292,6 @@ define @floor_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv1f64(, metadata) define @floor_nxv2f64( %x) strictfp { ; RV32-LABEL: floor_nxv2f64: @@ -345,7 +333,6 @@ define @floor_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv2f64(, metadata) define @floor_nxv4f64( %x) strictfp { ; RV32-LABEL: floor_nxv4f64: @@ -387,7 +374,6 @@ define @floor_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv4f64(, metadata) define @floor_nxv8f64( %x) strictfp { ; RV32-LABEL: floor_nxv8f64: @@ -429,4 +415,3 @@ define @floor_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.floor.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.floor.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll index 9adbca55bcd01..326ac8c8c607d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -34,7 +34,6 @@ define @floor_nxv1bf16( %x) { %a = call @llvm.floor.nxv1bf16( %x) ret %a } -declare @llvm.floor.nxv1bf16() define @floor_nxv2bf16( %x) { ; CHECK-LABEL: floor_nxv2bf16: @@ -58,7 +57,6 @@ define @floor_nxv2bf16( %x) { %a = call @llvm.floor.nxv2bf16( %x) ret %a } -declare @llvm.floor.nxv2bf16() define @floor_nxv4bf16( %x) { ; CHECK-LABEL: floor_nxv4bf16: @@ -82,7 +80,6 @@ define @floor_nxv4bf16( %x) { %a = call @llvm.floor.nxv4bf16( %x) ret %a } -declare @llvm.floor.nxv4bf16() define @floor_nxv8bf16( %x) { ; CHECK-LABEL: floor_nxv8bf16: @@ -106,7 +103,6 @@ define @floor_nxv8bf16( %x) { %a = call @llvm.floor.nxv8bf16( %x) ret %a } -declare @llvm.floor.nxv8bf16() define @floor_nxv16bf16( %x) { ; CHECK-LABEL: floor_nxv16bf16: @@ -130,7 +126,6 @@ define @floor_nxv16bf16( %x) { %a = call @llvm.floor.nxv16bf16( %x) ret %a } -declare @llvm.floor.nxv16bf16() define @floor_nxv32bf16( %x) { ; CHECK-LABEL: floor_nxv32bf16: @@ -168,7 +163,6 @@ define @floor_nxv32bf16( %x) { %a = call @llvm.floor.nxv32bf16( %x) ret %a } -declare @llvm.floor.nxv32bf16() define @floor_nxv1f16( %x) { ; ZVFH-LABEL: floor_nxv1f16: @@ -208,7 +202,6 @@ define @floor_nxv1f16( %x) { %a = call @llvm.floor.nxv1f16( %x) ret %a } -declare @llvm.floor.nxv1f16() define @floor_nxv2f16( %x) { ; ZVFH-LABEL: floor_nxv2f16: @@ -248,7 +241,6 @@ define @floor_nxv2f16( %x) { %a = call @llvm.floor.nxv2f16( %x) ret %a } -declare @llvm.floor.nxv2f16() define @floor_nxv4f16( %x) { ; ZVFH-LABEL: floor_nxv4f16: @@ -288,7 +280,6 @@ define @floor_nxv4f16( %x) { %a = call @llvm.floor.nxv4f16( %x) ret %a } -declare @llvm.floor.nxv4f16() define @floor_nxv8f16( %x) { ; ZVFH-LABEL: floor_nxv8f16: @@ -328,7 +319,6 @@ define @floor_nxv8f16( %x) { %a = call @llvm.floor.nxv8f16( %x) ret %a } -declare @llvm.floor.nxv8f16() define @floor_nxv16f16( %x) { ; ZVFH-LABEL: floor_nxv16f16: @@ -368,7 +358,6 @@ define @floor_nxv16f16( %x) { %a = call @llvm.floor.nxv16f16( %x) ret %a } -declare @llvm.floor.nxv16f16() define @floor_nxv32f16( %x) { ; ZVFH-LABEL: floor_nxv32f16: @@ -422,7 +411,6 @@ define @floor_nxv32f16( %x) { %a = call @llvm.floor.nxv32f16( %x) ret %a } -declare @llvm.floor.nxv32f16() define @floor_nxv1f32( %x) { ; CHECK-LABEL: floor_nxv1f32: @@ -442,7 +430,6 @@ define @floor_nxv1f32( %x) { %a = call @llvm.floor.nxv1f32( %x) ret %a } -declare @llvm.floor.nxv1f32() define @floor_nxv2f32( %x) { ; CHECK-LABEL: floor_nxv2f32: @@ -462,7 +449,6 @@ define @floor_nxv2f32( %x) { %a = call @llvm.floor.nxv2f32( %x) ret %a } -declare @llvm.floor.nxv2f32() define @floor_nxv4f32( %x) { ; CHECK-LABEL: floor_nxv4f32: @@ -482,7 +468,6 @@ define @floor_nxv4f32( %x) { %a = call @llvm.floor.nxv4f32( %x) ret %a } -declare @llvm.floor.nxv4f32() define @floor_nxv8f32( %x) { ; CHECK-LABEL: floor_nxv8f32: @@ -502,7 +487,6 @@ define @floor_nxv8f32( %x) { %a = call @llvm.floor.nxv8f32( %x) ret %a } -declare @llvm.floor.nxv8f32() define @floor_nxv16f32( %x) { ; CHECK-LABEL: floor_nxv16f32: @@ -522,7 +506,6 @@ define @floor_nxv16f32( %x) { %a = call @llvm.floor.nxv16f32( %x) ret %a } -declare @llvm.floor.nxv16f32() define @floor_nxv1f64( %x) { ; RV32ZFH-LABEL: floor_nxv1f64: @@ -589,7 +572,6 @@ define @floor_nxv1f64( %x) { %a = call @llvm.floor.nxv1f64( %x) ret %a } -declare @llvm.floor.nxv1f64() define @floor_nxv2f64( %x) { ; RV32ZFH-LABEL: floor_nxv2f64: @@ -656,7 +638,6 @@ define @floor_nxv2f64( %x) { %a = call @llvm.floor.nxv2f64( %x) ret %a } -declare @llvm.floor.nxv2f64() define @floor_nxv4f64( %x) { ; RV32ZFH-LABEL: floor_nxv4f64: @@ -723,7 +704,6 @@ define @floor_nxv4f64( %x) { %a = call @llvm.floor.nxv4f64( %x) ret %a } -declare @llvm.floor.nxv4f64() define @floor_nxv8f64( %x) { ; RV32ZFH-LABEL: floor_nxv8f64: @@ -790,4 +770,3 @@ define @floor_nxv8f64( %x) { %a = call @llvm.floor.nxv8f64( %x) ret %a } -declare @llvm.floor.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll index bd1209a17b534..0f26832cffdc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll @@ -689,39 +689,6 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) { ret <16 x i8> %sub } -declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1) -declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) - -declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1) -declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) -declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) - -declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1) -declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) -declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) - -declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) -declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) - -declare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1) - -declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) -declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) -declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) -declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>) -declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) -declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) - ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll index 84da351de76ba..fa81e1f6f3514 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32) - define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i8: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32) - define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i8: ; CHECK: # %bb.0: @@ -52,8 +48,6 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32) - define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i8: ; CHECK: # %bb.0: @@ -76,8 +70,6 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32) - define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i8: ; CHECK: # %bb.0: @@ -100,8 +92,6 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.abs.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32) - define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i16: ; CHECK: # %bb.0: @@ -124,8 +114,6 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.abs.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32) - define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i16: ; CHECK: # %bb.0: @@ -148,8 +136,6 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.abs.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32) - define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i16: ; CHECK: # %bb.0: @@ -172,8 +158,6 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.abs.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32) - define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i16: ; CHECK: # %bb.0: @@ -196,8 +180,6 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.abs.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32) - define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i32: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32) - define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i32: ; CHECK: # %bb.0: @@ -244,8 +224,6 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.abs.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32) - define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i32: ; CHECK: # %bb.0: @@ -268,8 +246,6 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.abs.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32) - define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i32: ; CHECK: # %bb.0: @@ -292,8 +268,6 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32) - define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v2i64: ; CHECK: # %bb.0: @@ -316,8 +290,6 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32) - define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v4i64: ; CHECK: # %bb.0: @@ -340,8 +312,6 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32) - define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v8i64: ; CHECK: # %bb.0: @@ -364,8 +334,6 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.abs.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32) - define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v15i64: ; CHECK: # %bb.0: @@ -388,8 +356,6 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32) - define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v16i64: ; CHECK: # %bb.0: @@ -412,8 +378,6 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32) - define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_abs_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll index 2356237d790b6..847722ae6b8ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll @@ -16,7 +16,6 @@ define void @abs_v16i8(ptr %x) { store <16 x i8> %b, ptr %x ret void } -declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) define void @abs_v8i16(ptr %x) { ; CHECK-LABEL: abs_v8i16: @@ -32,7 +31,6 @@ define void @abs_v8i16(ptr %x) { store <8 x i16> %b, ptr %x ret void } -declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) define void @abs_v6i16(ptr %x) { ; CHECK-LABEL: abs_v6i16: @@ -48,7 +46,6 @@ define void @abs_v6i16(ptr %x) { store <6 x i16> %b, ptr %x ret void } -declare <6 x i16> @llvm.abs.v6i16(<6 x i16>, i1) define void @abs_v4i32(ptr %x) { ; CHECK-LABEL: abs_v4i32: @@ -64,7 +61,6 @@ define void @abs_v4i32(ptr %x) { store <4 x i32> %b, ptr %x ret void } -declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) define void @abs_v2i64(ptr %x) { ; CHECK-LABEL: abs_v2i64: @@ -80,7 +76,6 @@ define void @abs_v2i64(ptr %x) { store <2 x i64> %b, ptr %x ret void } -declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) define void @abs_v32i8(ptr %x) { ; CHECK-LABEL: abs_v32i8: @@ -97,7 +92,6 @@ define void @abs_v32i8(ptr %x) { store <32 x i8> %b, ptr %x ret void } -declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) define void @abs_v16i16(ptr %x) { ; CHECK-LABEL: abs_v16i16: @@ -113,7 +107,6 @@ define void @abs_v16i16(ptr %x) { store <16 x i16> %b, ptr %x ret void } -declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) define void @abs_v8i32(ptr %x) { ; CHECK-LABEL: abs_v8i32: @@ -129,7 +122,6 @@ define void @abs_v8i32(ptr %x) { store <8 x i32> %b, ptr %x ret void } -declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) define void @abs_v4i64(ptr %x) { ; CHECK-LABEL: abs_v4i64: @@ -145,7 +137,6 @@ define void @abs_v4i64(ptr %x) { store <4 x i64> %b, ptr %x ret void } -declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) define void @abs_v4i64_of_sext_v4i8(ptr %x) { ; CHECK-LABEL: abs_v4i64_of_sext_v4i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll index 3d83065009f28..f436bbb9a66ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8>, <2 x i1>, i32) - define <2 x i8> @vp_bitreverse_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v2i8: ; CHECK: # %bb.0: @@ -58,8 +56,6 @@ define <2 x i8> @vp_bitreverse_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i8> @vp_bitreverse_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v4i8: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define <4 x i8> @vp_bitreverse_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8>, <8 x i1>, i32) - define <8 x i8> @vp_bitreverse_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v8i8: ; CHECK: # %bb.0: @@ -166,8 +160,6 @@ define <8 x i8> @vp_bitreverse_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8>, <16 x i1>, i32) - define <16 x i8> @vp_bitreverse_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v16i8: ; CHECK: # %bb.0: @@ -220,8 +212,6 @@ define <16 x i8> @vp_bitreverse_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i16> @vp_bitreverse_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v2i16: ; CHECK: # %bb.0: @@ -288,8 +278,6 @@ define <2 x i16> @vp_bitreverse_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i16> @vp_bitreverse_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v4i16: ; CHECK: # %bb.0: @@ -356,8 +344,6 @@ define <4 x i16> @vp_bitreverse_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16>, <8 x i1>, i32) - define <8 x i16> @vp_bitreverse_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v8i16: ; CHECK: # %bb.0: @@ -424,8 +410,6 @@ define <8 x i16> @vp_bitreverse_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16>, <16 x i1>, i32) - define <16 x i16> @vp_bitreverse_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v16i16: ; CHECK: # %bb.0: @@ -492,8 +476,6 @@ define <16 x i16> @vp_bitreverse_v16i16_unmasked(<16 x i16> %va, i32 zeroext %ev ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i32> @vp_bitreverse_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v2i32: ; CHECK: # %bb.0: @@ -576,8 +558,6 @@ define <2 x i32> @vp_bitreverse_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i32> @vp_bitreverse_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v4i32: ; CHECK: # %bb.0: @@ -660,8 +640,6 @@ define <4 x i32> @vp_bitreverse_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32>, <8 x i1>, i32) - define <8 x i32> @vp_bitreverse_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v8i32: ; CHECK: # %bb.0: @@ -744,8 +722,6 @@ define <8 x i32> @vp_bitreverse_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32>, <16 x i1>, i32) - define <16 x i32> @vp_bitreverse_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v16i32: ; CHECK: # %bb.0: @@ -828,8 +804,6 @@ define <16 x i32> @vp_bitreverse_v16i32_unmasked(<16 x i32> %va, i32 zeroext %ev ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i64> @vp_bitreverse_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v2i64: ; RV32: # %bb.0: @@ -1101,8 +1075,6 @@ define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x i64> @vp_bitreverse_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v4i64: ; RV32: # %bb.0: @@ -1374,8 +1346,6 @@ define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64>, <8 x i1>, i32) - define <8 x i64> @vp_bitreverse_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v8i64: ; RV32: # %bb.0: @@ -1647,8 +1617,6 @@ define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v15i64: ; RV32: # %bb.0: @@ -2026,8 +1994,6 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64>, <16 x i1>, i32) - define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_v16i64: ; RV32: # %bb.0: @@ -2405,8 +2371,6 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev ret <16 x i64> %v } -declare <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16>, <128 x i1>, i32) - define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_v128i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll index 6d9793c12153e..10b48d2ebe1f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -49,7 +49,6 @@ define void @bitreverse_v8i16(ptr %x, ptr %y) { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) define void @bitreverse_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v4i32: @@ -104,7 +103,6 @@ define void @bitreverse_v4i32(ptr %x, ptr %y) { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) define void @bitreverse_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: bitreverse_v2i64: @@ -254,7 +252,6 @@ define void @bitreverse_v2i64(ptr %x, ptr %y) { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) define void @bitreverse_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v16i16: @@ -301,7 +298,6 @@ define void @bitreverse_v16i16(ptr %x, ptr %y) { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) define void @bitreverse_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v8i32: @@ -356,7 +352,6 @@ define void @bitreverse_v8i32(ptr %x, ptr %y) { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) define void @bitreverse_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: bitreverse_v4i64: @@ -506,4 +501,3 @@ define void @bitreverse_v4i64(ptr %x, ptr %y) { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll index b7ca932bb1c45..eca94ccb9bf7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i16> @llvm.vp.bswap.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i16> @vp_bswap_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v2i16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define <2 x i16> @vp_bswap_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.bswap.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i16> @vp_bswap_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v4i16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x i16> @vp_bswap_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.bswap.v8i16(<8 x i16>, <8 x i1>, i32) - define <8 x i16> @vp_bswap_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v8i16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define <8 x i16> @vp_bswap_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.bswap.v16i16(<16 x i16>, <16 x i1>, i32) - define <16 x i16> @vp_bswap_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v16i16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define <16 x i16> @vp_bswap_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.bswap.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i32> @vp_bswap_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v2i32: ; CHECK: # %bb.0: @@ -150,8 +140,6 @@ define <2 x i32> @vp_bswap_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i32> @vp_bswap_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v4i32: ; CHECK: # %bb.0: @@ -192,8 +180,6 @@ define <4 x i32> @vp_bswap_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.bswap.v8i32(<8 x i32>, <8 x i1>, i32) - define <8 x i32> @vp_bswap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v8i32: ; CHECK: # %bb.0: @@ -234,8 +220,6 @@ define <8 x i32> @vp_bswap_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.bswap.v16i32(<16 x i32>, <16 x i1>, i32) - define <16 x i32> @vp_bswap_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v16i32: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define <16 x i32> @vp_bswap_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.bswap.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i64> @vp_bswap_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v2i64: ; RV32: # %bb.0: @@ -436,8 +418,6 @@ define <2 x i64> @vp_bswap_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.bswap.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x i64> @vp_bswap_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v4i64: ; RV32: # %bb.0: @@ -596,8 +576,6 @@ define <4 x i64> @vp_bswap_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.bswap.v8i64(<8 x i64>, <8 x i1>, i32) - define <8 x i64> @vp_bswap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v8i64: ; RV32: # %bb.0: @@ -756,8 +734,6 @@ define <8 x i64> @vp_bswap_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v15i64: ; RV32: # %bb.0: @@ -1021,8 +997,6 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64>, <16 x i1>, i32) - define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bswap_v16i64: ; RV32: # %bb.0: @@ -1286,8 +1260,6 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16>, <128 x i1>, i32) - define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bswap_v128i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll index 5b823442c8b04..4479c30151956 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -28,7 +28,6 @@ define void @bswap_v8i16(ptr %x, ptr %y) { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>) define void @bswap_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: bswap_v4i32: @@ -62,7 +61,6 @@ define void @bswap_v4i32(ptr %x, ptr %y) { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) define void @bswap_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: bswap_v2i64: @@ -155,7 +153,6 @@ define void @bswap_v2i64(ptr %x, ptr %y) { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) define void @bswap_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: bswap_v16i16: @@ -181,7 +178,6 @@ define void @bswap_v16i16(ptr %x, ptr %y) { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>) define void @bswap_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: bswap_v8i32: @@ -215,7 +211,6 @@ define void @bswap_v8i32(ptr %x, ptr %y) { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>) define void @bswap_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: bswap_v4i64: @@ -308,4 +303,3 @@ define void @bswap_v4i64(ptr %x, ptr %y) { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll index 4b42c517379ad..466d5d4b8e80a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.ceil.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.ceil.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.ceil.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ret <16 x half> %v } -declare <2 x float> @llvm.vp.ceil.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.ceil.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) ret <16 x float> %v } -declare <2 x double> @llvm.vp.ceil.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev ret <15 x double> %v } -declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev ret <16 x double> %v } -declare <32 x double> @llvm.vp.ceil.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_ceil_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll index 9d42f2b6adeed..6c5a0d44a71e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>) define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) { ; RV32-LABEL: compressstore_v1f16: ; RV32: # %bb.0: @@ -25,7 +24,6 @@ define void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>) define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) { ; RV32-LABEL: compressstore_v2f16: ; RV32: # %bb.0: @@ -48,7 +46,6 @@ define void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>) define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) { ; RV32-LABEL: compressstore_v4f16: ; RV32: # %bb.0: @@ -71,7 +68,6 @@ define void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>) define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) { ; RV32-LABEL: compressstore_v8f16: ; RV32: # %bb.0: @@ -94,7 +90,6 @@ define void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>) define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) { ; RV32-LABEL: compressstore_v1f32: ; RV32: # %bb.0: @@ -117,7 +112,6 @@ define void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>) define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) { ; RV32-LABEL: compressstore_v2f32: ; RV32: # %bb.0: @@ -140,7 +134,6 @@ define void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>) define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) { ; RV32-LABEL: compressstore_v4f32: ; RV32: # %bb.0: @@ -163,7 +156,6 @@ define void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>) define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) { ; RV32-LABEL: compressstore_v8f32: ; RV32: # %bb.0: @@ -186,7 +178,6 @@ define void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>) define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) { ; RV32-LABEL: compressstore_v1f64: ; RV32: # %bb.0: @@ -209,7 +200,6 @@ define void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>) define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) { ; RV32-LABEL: compressstore_v2f64: ; RV32: # %bb.0: @@ -232,7 +222,6 @@ define void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>) define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) { ; RV32-LABEL: compressstore_v4f64: ; RV32: # %bb.0: @@ -255,7 +244,6 @@ define void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>) define void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) { ; RV32-LABEL: compressstore_v8f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll index a388ba92f302b..002cf3440dd2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>) define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i8: ; CHECK: # %bb.0: @@ -16,7 +15,6 @@ define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>) define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i8: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>) define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i8: ; CHECK: # %bb.0: @@ -44,7 +41,6 @@ define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>) define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i8: ; CHECK: # %bb.0: @@ -58,7 +54,6 @@ define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>) define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i16: ; CHECK: # %bb.0: @@ -72,7 +67,6 @@ define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>) define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i16: ; CHECK: # %bb.0: @@ -86,7 +80,6 @@ define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>) define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i16: ; CHECK: # %bb.0: @@ -100,7 +93,6 @@ define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>) define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i16: ; CHECK: # %bb.0: @@ -114,7 +106,6 @@ define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>) define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i32: ; CHECK: # %bb.0: @@ -128,7 +119,6 @@ define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>) define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i32: ; CHECK: # %bb.0: @@ -142,7 +132,6 @@ define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>) define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i32: ; CHECK: # %bb.0: @@ -156,7 +145,6 @@ define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>) define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i32: ; CHECK: # %bb.0: @@ -170,7 +158,6 @@ define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>) define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) { ; CHECK-LABEL: compressstore_v1i64: ; CHECK: # %bb.0: @@ -184,7 +171,6 @@ define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>) define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) { ; CHECK-LABEL: compressstore_v2i64: ; CHECK: # %bb.0: @@ -198,7 +184,6 @@ define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>) define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) { ; CHECK-LABEL: compressstore_v4i64: ; CHECK: # %bb.0: @@ -212,7 +197,6 @@ define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) { ret void } -declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>) define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) { ; CHECK-LABEL: compressstore_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll index b1af4e685c58f..00c36cb7f7327 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32) - define <2 x i8> @vp_ctlz_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v2i8: ; CHECK: # %bb.0: @@ -62,8 +60,6 @@ define <2 x i8> @vp_ctlz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32) - define <4 x i8> @vp_ctlz_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v4i8: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define <4 x i8> @vp_ctlz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32) - define <8 x i8> @vp_ctlz_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v8i8: ; CHECK: # %bb.0: @@ -178,8 +172,6 @@ define <8 x i8> @vp_ctlz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32) - define <16 x i8> @vp_ctlz_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v16i8: ; CHECK: # %bb.0: @@ -236,8 +228,6 @@ define <16 x i8> @vp_ctlz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32) - define <2 x i16> @vp_ctlz_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v2i16: ; CHECK: # %bb.0: @@ -312,8 +302,6 @@ define <2 x i16> @vp_ctlz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32) - define <4 x i16> @vp_ctlz_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v4i16: ; CHECK: # %bb.0: @@ -388,8 +376,6 @@ define <4 x i16> @vp_ctlz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32) - define <8 x i16> @vp_ctlz_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v8i16: ; CHECK: # %bb.0: @@ -464,8 +450,6 @@ define <8 x i16> @vp_ctlz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32) - define <16 x i16> @vp_ctlz_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v16i16: ; CHECK: # %bb.0: @@ -540,8 +524,6 @@ define <16 x i16> @vp_ctlz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32) - define <2 x i32> @vp_ctlz_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v2i32: ; CHECK: # %bb.0: @@ -622,8 +604,6 @@ define <2 x i32> @vp_ctlz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32) - define <4 x i32> @vp_ctlz_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v4i32: ; CHECK: # %bb.0: @@ -704,8 +684,6 @@ define <4 x i32> @vp_ctlz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32) - define <8 x i32> @vp_ctlz_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v8i32: ; CHECK: # %bb.0: @@ -786,8 +764,6 @@ define <8 x i32> @vp_ctlz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32) - define <16 x i32> @vp_ctlz_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctlz_v16i32: ; CHECK: # %bb.0: @@ -868,8 +844,6 @@ define <16 x i32> @vp_ctlz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32) - define <2 x i64> @vp_ctlz_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v2i64: ; RV32: # %bb.0: @@ -1078,8 +1052,6 @@ define <2 x i64> @vp_ctlz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32) - define <4 x i64> @vp_ctlz_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v4i64: ; RV32: # %bb.0: @@ -1288,8 +1260,6 @@ define <4 x i64> @vp_ctlz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32) - define <8 x i64> @vp_ctlz_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v8i64: ; RV32: # %bb.0: @@ -1498,8 +1468,6 @@ define <8 x i64> @vp_ctlz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32) - define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v15i64: ; RV32: # %bb.0: @@ -1708,8 +1676,6 @@ define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32) - define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v16i64: ; RV32: # %bb.0: @@ -1918,8 +1884,6 @@ define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32) - define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctlz_v32i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll index 61730b87c5517..02e1ec8da49fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -80,7 +80,6 @@ define void @ctlz_v16i8(ptr %x, ptr %y) nounwind { store <16 x i8> %c, ptr %x ret void } -declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) define void @ctlz_v8i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v8i16: @@ -157,7 +156,6 @@ define void @ctlz_v8i16(ptr %x, ptr %y) nounwind { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) define void @ctlz_v4i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v4i32: @@ -240,7 +238,6 @@ define void @ctlz_v4i32(ptr %x, ptr %y) nounwind { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) define void @ctlz_v2i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: ctlz_v2i64: @@ -393,7 +390,6 @@ define void @ctlz_v2i64(ptr %x, ptr %y) nounwind { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) define void @ctlz_v32i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v32i8: @@ -471,7 +467,6 @@ define void @ctlz_v32i8(ptr %x, ptr %y) nounwind { store <32 x i8> %c, ptr %x ret void } -declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1) define void @ctlz_v16i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v16i16: @@ -548,7 +543,6 @@ define void @ctlz_v16i16(ptr %x, ptr %y) nounwind { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1) define void @ctlz_v8i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_v8i32: @@ -631,7 +625,6 @@ define void @ctlz_v8i32(ptr %x, ptr %y) nounwind { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) define void @ctlz_v4i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: ctlz_v4i64: @@ -784,7 +777,6 @@ define void @ctlz_v4i64(ptr %x, ptr %y) nounwind { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) define void @ctlz_zero_undef_v16i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: ctlz_zero_undef_v16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll index a993ed909d940..f56438bf87e6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32) - define <2 x i8> @vp_ctpop_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i8: ; CHECK: # %bb.0: @@ -48,8 +46,6 @@ define <2 x i8> @vp_ctpop_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i8> @vp_ctpop_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i8: ; CHECK: # %bb.0: @@ -92,8 +88,6 @@ define <4 x i8> @vp_ctpop_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32) - define <8 x i8> @vp_ctpop_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i8: ; CHECK: # %bb.0: @@ -136,8 +130,6 @@ define <8 x i8> @vp_ctpop_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32) - define <16 x i8> @vp_ctpop_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i8: ; CHECK: # %bb.0: @@ -180,8 +172,6 @@ define <16 x i8> @vp_ctpop_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i16> @vp_ctpop_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i16: ; CHECK: # %bb.0: @@ -238,8 +228,6 @@ define <2 x i16> @vp_ctpop_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i16> @vp_ctpop_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i16: ; CHECK: # %bb.0: @@ -296,8 +284,6 @@ define <4 x i16> @vp_ctpop_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32) - define <8 x i16> @vp_ctpop_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i16: ; CHECK: # %bb.0: @@ -354,8 +340,6 @@ define <8 x i16> @vp_ctpop_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32) - define <16 x i16> @vp_ctpop_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i16: ; CHECK: # %bb.0: @@ -412,8 +396,6 @@ define <16 x i16> @vp_ctpop_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i32> @vp_ctpop_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i32: ; CHECK: # %bb.0: @@ -472,8 +454,6 @@ define <2 x i32> @vp_ctpop_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i32> @vp_ctpop_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i32: ; CHECK: # %bb.0: @@ -532,8 +512,6 @@ define <4 x i32> @vp_ctpop_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32) - define <8 x i32> @vp_ctpop_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i32: ; CHECK: # %bb.0: @@ -592,8 +570,6 @@ define <8 x i32> @vp_ctpop_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32) - define <16 x i32> @vp_ctpop_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i32: ; CHECK: # %bb.0: @@ -652,8 +628,6 @@ define <16 x i32> @vp_ctpop_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i64> @vp_ctpop_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v2i64: ; RV32: # %bb.0: @@ -806,8 +780,6 @@ define <2 x i64> @vp_ctpop_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x i64> @vp_ctpop_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v4i64: ; RV32: # %bb.0: @@ -960,8 +932,6 @@ define <4 x i64> @vp_ctpop_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32) - define <8 x i64> @vp_ctpop_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v8i64: ; RV32: # %bb.0: @@ -1114,8 +1084,6 @@ define <8 x i64> @vp_ctpop_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v15i64: ; RV32: # %bb.0: @@ -1268,8 +1236,6 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32) - define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v16i64: ; RV32: # %bb.0: @@ -1422,8 +1388,6 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v32i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll index 44b9331fd2caf..f7835cbbfafa5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll @@ -37,7 +37,6 @@ define void @ctpop_v16i8(ptr %x, ptr %y) { store <16 x i8> %c, ptr %x ret void } -declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) define void @ctpop_v8i16(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v8i16: @@ -79,7 +78,6 @@ define void @ctpop_v8i16(ptr %x, ptr %y) { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) define void @ctpop_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v4i32: @@ -122,7 +120,6 @@ define void @ctpop_v4i32(ptr %x, ptr %y) { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) define void @ctpop_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: ctpop_v2i64: @@ -214,7 +211,6 @@ define void @ctpop_v2i64(ptr %x, ptr %y) { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) define void @ctpop_v32i8(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v32i8: @@ -251,7 +247,6 @@ define void @ctpop_v32i8(ptr %x, ptr %y) { store <32 x i8> %c, ptr %x ret void } -declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>) define void @ctpop_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v16i16: @@ -293,7 +288,6 @@ define void @ctpop_v16i16(ptr %x, ptr %y) { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) define void @ctpop_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: ctpop_v8i32: @@ -428,7 +422,6 @@ define <8 x i1> @ctpop_v8i32_ne_one(ptr %x, ptr %y) { %cmp = icmp ne <8 x i32> %c, ret <8 x i1> %cmp } -declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) define void @ctpop_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: ctpop_v4i64: @@ -612,4 +605,3 @@ define <4 x i1> @ctpop_v4i64_ne_one(ptr %x, ptr %y) { %cmp = icmp ne <4 x i64> %c, ret <4 x i1> %cmp } -declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll index 1922006b8a581..098384d200045 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32) - define <2 x i8> @vp_cttz_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v2i8: ; CHECK: # %bb.0: @@ -54,8 +52,6 @@ define <2 x i8> @vp_cttz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32) - define <4 x i8> @vp_cttz_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v4i8: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define <4 x i8> @vp_cttz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32) - define <8 x i8> @vp_cttz_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v8i8: ; CHECK: # %bb.0: @@ -154,8 +148,6 @@ define <8 x i8> @vp_cttz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32) - define <16 x i8> @vp_cttz_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v16i8: ; CHECK: # %bb.0: @@ -204,8 +196,6 @@ define <16 x i8> @vp_cttz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32) - define <2 x i16> @vp_cttz_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v2i16: ; CHECK: # %bb.0: @@ -268,8 +258,6 @@ define <2 x i16> @vp_cttz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32) - define <4 x i16> @vp_cttz_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v4i16: ; CHECK: # %bb.0: @@ -332,8 +320,6 @@ define <4 x i16> @vp_cttz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32) - define <8 x i16> @vp_cttz_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v8i16: ; CHECK: # %bb.0: @@ -396,8 +382,6 @@ define <8 x i16> @vp_cttz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32) - define <16 x i16> @vp_cttz_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v16i16: ; CHECK: # %bb.0: @@ -460,8 +444,6 @@ define <16 x i16> @vp_cttz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32) - define <2 x i32> @vp_cttz_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v2i32: ; CHECK: # %bb.0: @@ -526,8 +508,6 @@ define <2 x i32> @vp_cttz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32) - define <4 x i32> @vp_cttz_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v4i32: ; CHECK: # %bb.0: @@ -592,8 +572,6 @@ define <4 x i32> @vp_cttz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32) - define <8 x i32> @vp_cttz_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v8i32: ; CHECK: # %bb.0: @@ -658,8 +636,6 @@ define <8 x i32> @vp_cttz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32) - define <16 x i32> @vp_cttz_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_cttz_v16i32: ; CHECK: # %bb.0: @@ -724,8 +700,6 @@ define <16 x i32> @vp_cttz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32) - define <2 x i64> @vp_cttz_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v2i64: ; RV32: # %bb.0: @@ -890,8 +864,6 @@ define <2 x i64> @vp_cttz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32) - define <4 x i64> @vp_cttz_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v4i64: ; RV32: # %bb.0: @@ -1056,8 +1028,6 @@ define <4 x i64> @vp_cttz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32) - define <8 x i64> @vp_cttz_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v8i64: ; RV32: # %bb.0: @@ -1222,8 +1192,6 @@ define <8 x i64> @vp_cttz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32) - define <15 x i64> @vp_cttz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v15i64: ; RV32: # %bb.0: @@ -1388,8 +1356,6 @@ define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ret <15 x i64> %v } -declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32) - define <16 x i64> @vp_cttz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v16i64: ; RV32: # %bb.0: @@ -1554,8 +1520,6 @@ define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32) - define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_cttz_v32i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll index 307b143f4449f..ad51cab1ba8d2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -82,7 +82,6 @@ define void @cttz_v16i8(ptr %x, ptr %y) nounwind { store <16 x i8> %c, ptr %x ret void } -declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>, i1) define void @cttz_v8i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v8i16: @@ -159,7 +158,6 @@ define void @cttz_v8i16(ptr %x, ptr %y) nounwind { store <8 x i16> %c, ptr %x ret void } -declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>, i1) define void @cttz_v4i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v4i32: @@ -240,7 +238,6 @@ define void @cttz_v4i32(ptr %x, ptr %y) nounwind { store <4 x i32> %c, ptr %x ret void } -declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) define void @cttz_v2i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: cttz_v2i64: @@ -379,7 +376,6 @@ define void @cttz_v2i64(ptr %x, ptr %y) nounwind { store <2 x i64> %c, ptr %x ret void } -declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) define void @cttz_v32i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v32i8: @@ -459,7 +455,6 @@ define void @cttz_v32i8(ptr %x, ptr %y) nounwind { store <32 x i8> %c, ptr %x ret void } -declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>, i1) define void @cttz_v16i16(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v16i16: @@ -536,7 +531,6 @@ define void @cttz_v16i16(ptr %x, ptr %y) nounwind { store <16 x i16> %c, ptr %x ret void } -declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1) define void @cttz_v8i32(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_v8i32: @@ -617,7 +611,6 @@ define void @cttz_v8i32(ptr %x, ptr %y) nounwind { store <8 x i32> %c, ptr %x ret void } -declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1) define void @cttz_v4i64(ptr %x, ptr %y) nounwind { ; RV32I-LABEL: cttz_v4i64: @@ -756,7 +749,6 @@ define void @cttz_v4i64(ptr %x, ptr %y) nounwind { store <4 x i64> %c, ptr %x ret void } -declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>, i1) define void @cttz_zero_undef_v16i8(ptr %x, ptr %y) nounwind { ; RVI-LABEL: cttz_zero_undef_v16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll index fa311154fa973..21b4b81651966 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-fp.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV32 -declare <1 x half> @llvm.masked.expandload.v1f16(ptr, <1 x i1>, <1 x half>) define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1f16: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define <1 x half> @expandload_v1f16(ptr %base, <1 x half> %src0, <1 x i1> %mask) ret <1 x half>%res } -declare <2 x half> @llvm.masked.expandload.v2f16(ptr, <2 x i1>, <2 x half>) define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2f16: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define <2 x half> @expandload_v2f16(ptr %base, <2 x half> %src0, <2 x i1> %mask) ret <2 x half>%res } -declare <4 x half> @llvm.masked.expandload.v4f16(ptr, <4 x i1>, <4 x half>) define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4f16: ; CHECK: # %bb.0: @@ -52,7 +49,6 @@ define <4 x half> @expandload_v4f16(ptr %base, <4 x half> %src0, <4 x i1> %mask) ret <4 x half>%res } -declare <8 x half> @llvm.masked.expandload.v8f16(ptr, <8 x i1>, <8 x half>) define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8f16: ; CHECK: # %bb.0: @@ -68,7 +64,6 @@ define <8 x half> @expandload_v8f16(ptr %base, <8 x half> %src0, <8 x i1> %mask) ret <8 x half>%res } -declare <1 x float> @llvm.masked.expandload.v1f32(ptr, <1 x i1>, <1 x float>) define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1f32: ; CHECK: # %bb.0: @@ -84,7 +79,6 @@ define <1 x float> @expandload_v1f32(ptr %base, <1 x float> %src0, <1 x i1> %mas ret <1 x float>%res } -declare <2 x float> @llvm.masked.expandload.v2f32(ptr, <2 x i1>, <2 x float>) define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2f32: ; CHECK: # %bb.0: @@ -100,7 +94,6 @@ define <2 x float> @expandload_v2f32(ptr %base, <2 x float> %src0, <2 x i1> %mas ret <2 x float>%res } -declare <4 x float> @llvm.masked.expandload.v4f32(ptr, <4 x i1>, <4 x float>) define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4f32: ; CHECK: # %bb.0: @@ -116,7 +109,6 @@ define <4 x float> @expandload_v4f32(ptr %base, <4 x float> %src0, <4 x i1> %mas ret <4 x float>%res } -declare <8 x float> @llvm.masked.expandload.v8f32(ptr, <8 x i1>, <8 x float>) define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8f32: ; CHECK: # %bb.0: @@ -132,7 +124,6 @@ define <8 x float> @expandload_v8f32(ptr %base, <8 x float> %src0, <8 x i1> %mas ret <8 x float>%res } -declare <1 x double> @llvm.masked.expandload.v1f64(ptr, <1 x i1>, <1 x double>) define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1f64: ; CHECK: # %bb.0: @@ -148,7 +139,6 @@ define <1 x double> @expandload_v1f64(ptr %base, <1 x double> %src0, <1 x i1> %m ret <1 x double>%res } -declare <2 x double> @llvm.masked.expandload.v2f64(ptr, <2 x i1>, <2 x double>) define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2f64: ; CHECK: # %bb.0: @@ -164,7 +154,6 @@ define <2 x double> @expandload_v2f64(ptr %base, <2 x double> %src0, <2 x i1> %m ret <2 x double>%res } -declare <4 x double> @llvm.masked.expandload.v4f64(ptr, <4 x i1>, <4 x double>) define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4f64: ; CHECK: # %bb.0: @@ -180,7 +169,6 @@ define <4 x double> @expandload_v4f64(ptr %base, <4 x double> %src0, <4 x i1> %m ret <4 x double>%res } -declare <8 x double> @llvm.masked.expandload.v8f64(ptr, <8 x i1>, <8 x double>) define <8 x double> @expandload_v8f64(ptr %base, <8 x double> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll index 269d3df00f05d..7128f538354aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-expandload-int.ll @@ -4,7 +4,6 @@ ; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v %s -o - \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64 -declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>) define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i8: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) { ret <1 x i8>%res } -declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>) define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i8: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) { ret <2 x i8>%res } -declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>) define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i8: ; CHECK: # %bb.0: @@ -52,7 +49,6 @@ define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) { ret <4 x i8>%res } -declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>) define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i8: ; CHECK: # %bb.0: @@ -68,7 +64,6 @@ define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) { ret <8 x i8>%res } -declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>) define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i16: ; CHECK: # %bb.0: @@ -84,7 +79,6 @@ define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) { ret <1 x i16>%res } -declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>) define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i16: ; CHECK: # %bb.0: @@ -100,7 +94,6 @@ define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) { ret <2 x i16>%res } -declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>) define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i16: ; CHECK: # %bb.0: @@ -116,7 +109,6 @@ define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) { ret <4 x i16>%res } -declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>) define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i16: ; CHECK: # %bb.0: @@ -132,7 +124,6 @@ define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) { ret <8 x i16>%res } -declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>) define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i32: ; CHECK: # %bb.0: @@ -148,7 +139,6 @@ define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) { ret <1 x i32>%res } -declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>) define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i32: ; CHECK: # %bb.0: @@ -164,7 +154,6 @@ define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) { ret <2 x i32>%res } -declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>) define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i32: ; CHECK: # %bb.0: @@ -180,7 +169,6 @@ define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) { ret <4 x i32>%res } -declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>) define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i32: ; CHECK: # %bb.0: @@ -196,7 +184,6 @@ define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) { ret <8 x i32>%res } -declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>) define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) { ; CHECK-LABEL: expandload_v1i64: ; CHECK: # %bb.0: @@ -212,7 +199,6 @@ define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) { ret <1 x i64>%res } -declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>) define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) { ; CHECK-LABEL: expandload_v2i64: ; CHECK: # %bb.0: @@ -228,7 +214,6 @@ define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) { ret <2 x i64>%res } -declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>) define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) { ; CHECK-LABEL: expandload_v4i64: ; CHECK: # %bb.0: @@ -244,7 +229,6 @@ define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) { ret <4 x i64>%res } -declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>) define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) { ; CHECK-LABEL: expandload_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll index e2711a0231509..3263539d5c20d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -176,7 +176,6 @@ define void @extract_v2i32_nxv16i32_0( %x, ptr %y) { ret void } - define void @extract_v2i32_nxv16i32_2( %x, ptr %y) { ; CHECK-LABEL: extract_v2i32_nxv16i32_2: ; CHECK: # %bb.0: @@ -834,25 +833,3 @@ define void @extract_v2f16_v4f16_2(ptr %x, ptr %y) { ret void } -declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx) - -declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1( %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.nxv2i1( %vec, i64 %idx) - -declare <2 x i1> @llvm.vector.extract.v2i1.nxv32i1( %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.nxv32i1( %vec, i64 %idx) - -declare <2 x i1> @llvm.vector.extract.v2i1.nxv64i1( %vec, i64 %idx) -declare <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %vec, i64 %idx) - -declare <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx) -declare <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx) - -declare <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx) - -declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %vec, i64 %idx) - -declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %vec, i64 %idx) -declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll index 71b0624d91f22..22aef4899a6c2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll @@ -26,7 +26,6 @@ define <1 x half> @ceil_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half>, metadata) define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: ceil_v2f16: @@ -50,7 +49,6 @@ define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half>, metadata) define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: ceil_v4f16: @@ -74,7 +72,6 @@ define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half>, metadata) define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: ceil_v8f16: @@ -98,7 +95,6 @@ define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half>, metadata) define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: ceil_v16f16: @@ -122,7 +118,6 @@ define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata) define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: ceil_v32f16: @@ -147,7 +142,6 @@ define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata) define <1 x float> @ceil_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: ceil_v1f32: @@ -170,7 +164,6 @@ define <1 x float> @ceil_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata) define <2 x float> @ceil_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: ceil_v2f32: @@ -193,7 +186,6 @@ define <2 x float> @ceil_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float>, metadata) define <4 x float> @ceil_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: ceil_v4f32: @@ -216,7 +208,6 @@ define <4 x float> @ceil_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata) define <8 x float> @ceil_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: ceil_v8f32: @@ -239,7 +230,6 @@ define <8 x float> @ceil_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metadata) define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: ceil_v16f32: @@ -262,7 +252,6 @@ define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata) define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: ceil_v1f64: @@ -304,7 +293,6 @@ define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata) define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: ceil_v2f64: @@ -346,7 +334,6 @@ define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: ceil_v4f64: @@ -388,7 +375,6 @@ define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: ceil_v8f64: @@ -430,4 +416,3 @@ define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll index 9eca66eea865c..511382cf5436e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll @@ -26,7 +26,6 @@ define <1 x half> @floor_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half>, metadata) define <2 x half> @floor_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: floor_v2f16: @@ -50,7 +49,6 @@ define <2 x half> @floor_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half>, metadata) define <4 x half> @floor_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: floor_v4f16: @@ -74,7 +72,6 @@ define <4 x half> @floor_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half>, metadata) define <8 x half> @floor_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: floor_v8f16: @@ -98,7 +95,6 @@ define <8 x half> @floor_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half>, metadata) define <16 x half> @floor_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: floor_v16f16: @@ -122,7 +118,6 @@ define <16 x half> @floor_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata) define <32 x half> @floor_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: floor_v32f16: @@ -147,7 +142,6 @@ define <32 x half> @floor_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata) define <1 x float> @floor_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: floor_v1f32: @@ -170,7 +164,6 @@ define <1 x float> @floor_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata) define <2 x float> @floor_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: floor_v2f32: @@ -193,7 +186,6 @@ define <2 x float> @floor_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float>, metadata) define <4 x float> @floor_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: floor_v4f32: @@ -216,7 +208,6 @@ define <4 x float> @floor_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) define <8 x float> @floor_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: floor_v8f32: @@ -239,7 +230,6 @@ define <8 x float> @floor_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata) define <16 x float> @floor_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: floor_v16f32: @@ -262,7 +252,6 @@ define <16 x float> @floor_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata) define <1 x double> @floor_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: floor_v1f64: @@ -304,7 +293,6 @@ define <1 x double> @floor_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata) define <2 x double> @floor_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: floor_v2f64: @@ -346,7 +334,6 @@ define <2 x double> @floor_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) define <4 x double> @floor_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: floor_v4f64: @@ -388,7 +375,6 @@ define <4 x double> @floor_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) define <8 x double> @floor_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: floor_v8f64: @@ -430,4 +416,3 @@ define <8 x double> @floor_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll index 4494b97119403..76f5f0a32bd1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.floor.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.floor.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.floor.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.floor.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.floor.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.floor.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.floor.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll index 2bf039bd0104a..da6e2fae93687 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.maximum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v2f16: ; ZVFH: # %bb.0: @@ -76,8 +74,6 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.maximum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v4f16: ; ZVFH: # %bb.0: @@ -144,8 +140,6 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.maximum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v8f16: ; ZVFH: # %bb.0: @@ -214,8 +208,6 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.maximum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v16f16: ; ZVFH: # %bb.0: @@ -286,8 +278,6 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.maximum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32: ; CHECK: # %bb.0: @@ -319,8 +309,6 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.maximum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32: ; CHECK: # %bb.0: @@ -352,8 +340,6 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.maximum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32: ; CHECK: # %bb.0: @@ -387,8 +373,6 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.maximum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32: ; CHECK: # %bb.0: @@ -422,8 +406,6 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.maximum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64: ; CHECK: # %bb.0: @@ -455,8 +437,6 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.maximum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64: ; CHECK: # %bb.0: @@ -490,8 +470,6 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.maximum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64: ; CHECK: # %bb.0: @@ -525,8 +503,6 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <16 x double> @llvm.vp.maximum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f64: ; CHECK: # %bb.0: @@ -576,8 +552,6 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.maximum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll index e2cbdd3911ad5..6ee2e204bcfe3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.maximum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmax_v2f16_vv: ; ZVFH: # %bb.0: @@ -41,8 +39,6 @@ define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.maximum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmax_v4f16_vv: ; ZVFH: # %bb.0: @@ -74,8 +70,6 @@ define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ret <4 x half> %v } -declare <8 x half> @llvm.maximum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmax_v8f16_vv: ; ZVFH: # %bb.0: @@ -107,8 +101,6 @@ define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ret <8 x half> %v } -declare <16 x half> @llvm.maximum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmax_v16f16_vv: ; ZVFH: # %bb.0: @@ -140,8 +132,6 @@ define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ret <16 x half> %v } -declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmax_v2f32_vv: ; CHECK: # %bb.0: @@ -156,8 +146,6 @@ define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ret <2 x float> %v } -declare <4 x float> @llvm.maximum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmax_v4f32_vv: ; CHECK: # %bb.0: @@ -172,8 +160,6 @@ define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ret <4 x float> %v } -declare <8 x float> @llvm.maximum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmax_v8f32_vv: ; CHECK: # %bb.0: @@ -188,8 +174,6 @@ define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ret <8 x float> %v } -declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmax_v16f32_vv: ; CHECK: # %bb.0: @@ -204,8 +188,6 @@ define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ret <16 x float> %v } -declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmax_v2f64_vv: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ret <2 x double> %v } -declare <4 x double> @llvm.maximum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmax_v4f64_vv: ; CHECK: # %bb.0: @@ -236,8 +216,6 @@ define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ret <4 x double> %v } -declare <8 x double> @llvm.maximum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmax_v8f64_vv: ; CHECK: # %bb.0: @@ -252,8 +230,6 @@ define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ret <8 x double> %v } -declare <16 x double> @llvm.maximum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) nounwind { ; CHECK-LABEL: vfmax_v16f64_vv: ; CHECK: # %bb.0: @@ -358,8 +334,6 @@ define <2 x half> @vfmax_v2f16_vv_nnanb(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.vector.insert.v2f32.v4f32(<4 x half>, <2 x half>, i64) - define <4 x half> @vfmax_v2f16_vv_nnan_insert_subvector(<2 x half> %a, <2 x half> %b, <4 x half> %c) { ; ZVFH-LABEL: vfmax_v2f16_vv_nnan_insert_subvector: ; ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll index 73d83e86af4c6..e179970199171 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v2f16: ; ZVFH: # %bb.0: @@ -76,8 +74,6 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v4f16: ; ZVFH: # %bb.0: @@ -144,8 +140,6 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v8f16: ; ZVFH: # %bb.0: @@ -214,8 +208,6 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v16f16: ; ZVFH: # %bb.0: @@ -286,8 +278,6 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32: ; CHECK: # %bb.0: @@ -319,8 +309,6 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32: ; CHECK: # %bb.0: @@ -352,8 +340,6 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32: ; CHECK: # %bb.0: @@ -387,8 +373,6 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32: ; CHECK: # %bb.0: @@ -422,8 +406,6 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64: ; CHECK: # %bb.0: @@ -455,8 +437,6 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64: ; CHECK: # %bb.0: @@ -490,8 +470,6 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64: ; CHECK: # %bb.0: @@ -525,8 +503,6 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <16 x double> @llvm.vp.minimum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f64: ; CHECK: # %bb.0: @@ -576,8 +552,6 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.minimum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll index 9c75af359a4cb..a95177a1de9a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.minimum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmin_v2f16_vv: ; ZVFH: # %bb.0: @@ -41,8 +39,6 @@ define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.minimum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmin_v4f16_vv: ; ZVFH: # %bb.0: @@ -74,8 +70,6 @@ define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ret <4 x half> %v } -declare <8 x half> @llvm.minimum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmin_v8f16_vv: ; ZVFH: # %bb.0: @@ -107,8 +101,6 @@ define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ret <8 x half> %v } -declare <16 x half> @llvm.minimum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmin_v16f16_vv: ; ZVFH: # %bb.0: @@ -140,8 +132,6 @@ define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ret <16 x half> %v } -declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmin_v2f32_vv: ; CHECK: # %bb.0: @@ -156,8 +146,6 @@ define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ret <2 x float> %v } -declare <4 x float> @llvm.minimum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmin_v4f32_vv: ; CHECK: # %bb.0: @@ -172,8 +160,6 @@ define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ret <4 x float> %v } -declare <8 x float> @llvm.minimum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmin_v8f32_vv: ; CHECK: # %bb.0: @@ -188,8 +174,6 @@ define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ret <8 x float> %v } -declare <16 x float> @llvm.minimum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmin_v16f32_vv: ; CHECK: # %bb.0: @@ -204,8 +188,6 @@ define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ret <16 x float> %v } -declare <2 x double> @llvm.minimum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmin_v2f64_vv: ; CHECK: # %bb.0: @@ -220,8 +202,6 @@ define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ret <2 x double> %v } -declare <4 x double> @llvm.minimum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmin_v4f64_vv: ; CHECK: # %bb.0: @@ -236,8 +216,6 @@ define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ret <4 x double> %v } -declare <8 x double> @llvm.minimum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmin_v8f64_vv: ; CHECK: # %bb.0: @@ -252,8 +230,6 @@ define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ret <8 x double> %v } -declare <16 x double> @llvm.minimum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) nounwind { ; CHECK-LABEL: vfmin_v16f64_vv: ; CHECK: # %bb.0: @@ -358,8 +334,6 @@ define <2 x half> @vfmin_v2f16_vv_nnanb(<2 x half> %a, <2 x half> %b) { ret <2 x half> %v } -declare <4 x half> @llvm.vector.insert.v2f32.v4f32(<4 x half>, <2 x half>, i64) - define <4 x half> @vfmin_v2f16_vv_nnan_insert_subvector(<2 x half> %a, <2 x half> %b, <4 x half> %c) { ; ZVFH-LABEL: vfmin_v2f16_vv_nnan_insert_subvector: ; ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll index dd1b99bee6d55..8485eb8ac1caa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half>, metadata, metadata) - define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v2f16: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp { ret <2 x half> %r } -declare <4 x half> @llvm.experimental.constrained.nearbyint.v4f16(<4 x half>, metadata, metadata) - define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v4f16: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp { ret <4 x half> %r } -declare <8 x half> @llvm.experimental.constrained.nearbyint.v8f16(<8 x half>, metadata, metadata) - define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v8f16: ; CHECK: # %bb.0: @@ -79,8 +73,6 @@ define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp { ret <8 x half> %r } -declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata) - define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v16f16: ; CHECK: # %bb.0: @@ -104,8 +96,6 @@ define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp { ret <16 x half> %r } -declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata) - define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v32f16: ; CHECK: # %bb.0: @@ -130,8 +120,6 @@ define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp { ret <32 x half> %r } -declare <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float>, metadata, metadata) - define <2 x float> @nearbyint_v2f32(<2 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v2f32: ; CHECK: # %bb.0: @@ -154,8 +142,6 @@ define <2 x float> @nearbyint_v2f32(<2 x float> %v) strictfp { ret <2 x float> %r } -declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata) - define <4 x float> @nearbyint_v4f32(<4 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v4f32: ; CHECK: # %bb.0: @@ -178,8 +164,6 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %v) strictfp { ret <4 x float> %r } -declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata) - define <8 x float> @nearbyint_v8f32(<8 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v8f32: ; CHECK: # %bb.0: @@ -202,8 +186,6 @@ define <8 x float> @nearbyint_v8f32(<8 x float> %v) strictfp { ret <8 x float> %r } -declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata) - define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp { ; CHECK-LABEL: nearbyint_v16f32: ; CHECK: # %bb.0: @@ -226,8 +208,6 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp { ret <16 x float> %r } -declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) - define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp { ; RV32-LABEL: nearbyint_v2f64: ; RV32: # %bb.0: @@ -269,8 +249,6 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp { ret <2 x double> %r } -declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata) - define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp { ; RV32-LABEL: nearbyint_v4f64: ; RV32: # %bb.0: @@ -312,8 +290,6 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp { ret <4 x double> %r } -declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata) - define <8 x double> @nearbyint_v8f64(<8 x double> %v) strictfp { ; RV32-LABEL: nearbyint_v8f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll index 1f9a8bf8133c8..0788d0a719e11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll @@ -17,7 +17,6 @@ define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float>) define void @fp2ui_v2f32_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i32: @@ -34,7 +33,6 @@ define void @fp2ui_v2f32_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>) define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) { ; @@ -52,7 +50,6 @@ define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) { store <8 x i32> %d, ptr %y ret void } -declare <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float>) define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) { ; @@ -70,7 +67,6 @@ define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) { store <8 x i32> %d, ptr %y ret void } -declare <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float>) define void @fp2si_v2f32_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f32_v2i64: @@ -88,7 +84,6 @@ define void @fp2si_v2f32_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float>) define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i64: @@ -106,7 +101,6 @@ define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float>) define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) { ; @@ -125,7 +119,6 @@ define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) { store <8 x i64> %d, ptr %y ret void } -declare <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float>) define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) { ; @@ -144,7 +137,6 @@ define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) { store <8 x i64> %d, ptr %y ret void } -declare <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float>) define void @fp2si_v2f16_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f16_v2i64: @@ -164,7 +156,6 @@ define void @fp2si_v2f16_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half>) define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f16_v2i64: @@ -184,7 +175,6 @@ define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) { store <2 x i64> %d, ptr %y ret void } -declare <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half>) define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f64_v2i8: @@ -206,7 +196,6 @@ define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) { store <2 x i8> %d, ptr %y ret void } -declare <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double>) define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f64_v2i8: @@ -228,7 +217,6 @@ define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) { store <2 x i8> %d, ptr %y ret void } -declare <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double>) define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) { ; @@ -251,7 +239,6 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) { store <8 x i8> %d, ptr %y ret void } -declare <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double>) define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) { ; @@ -274,7 +261,6 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) { store <8 x i8> %d, ptr %y ret void } -declare <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> %a) define void @fp2si_v2f64_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f64_v2i32: @@ -292,7 +278,6 @@ define void @fp2si_v2f64_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double>) define void @fp2ui_v2f64_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2ui_v2f64_v2i32: @@ -310,7 +295,6 @@ define void @fp2ui_v2f64_v2i32(ptr %x, ptr %y) { store <2 x i32> %d, ptr %y ret void } -declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double>) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll index adb9016b30d23..465b166826a37 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s -declare <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f32: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define <2 x float> @vfpext_v2f16_v2f32_unmasked(<2 x half> %a, i32 zeroext %vl) ret <2 x float> %v } -declare <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f64: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define <2 x double> @vfpext_v2f16_v2f64_unmasked(<2 x half> %a, i32 zeroext %vl) ret <2 x double> %v } -declare <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f32_v2f64: ; CHECK: # %bb.0: @@ -78,8 +72,6 @@ define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl ret <2 x double> %v } -declare <15 x double> @llvm.vp.fpext.v15f64.v15f32(<15 x float>, <15 x i1>, i32) - define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v15f32_v15f64: ; CHECK: # %bb.0: @@ -91,8 +83,6 @@ define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 ze ret <15 x double> %v } -declare <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float>, <32 x i1>, i32) - define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v32f32_v32f64: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 ze ret <32 x double> %v } -declare <2 x float> @llvm.vp.fpext.v2f32.v2bf16(<2 x bfloat>, <2 x i1>, i32) - define <2 x float> @vfpext_v2bf16_v2f32(<2 x bfloat> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2bf16_v2f32: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define <2 x float> @vfpext_v2bf16_v2f32_unmasked(<2 x bfloat> %a, i32 zeroext %v ret <2 x float> %v } -declare <2 x double> @llvm.vp.fpext.v2f64.v2bf16(<2 x bfloat>, <2 x i1>, i32) - define <2 x double> @vfpext_v2bf16_v2f64(<2 x bfloat> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2bf16_v2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll index 71d6af6ea34c2..4b05de75dbfe3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll @@ -34,7 +34,6 @@ define <1 x float> @powi_v1f32(<1 x float> %x, i32 %y) nounwind { %a = call <1 x float> @llvm.powi.v1f32.i32(<1 x float> %x, i32 %y) ret <1 x float> %a } -declare <1 x float> @llvm.powi.v1f32.i32(<1 x float>, i32) define <2 x float> @powi_v2f32(<2 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v2f32: @@ -100,7 +99,6 @@ define <2 x float> @powi_v2f32(<2 x float> %x, i32 %y) nounwind { %a = call <2 x float> @llvm.powi.v2f32.i32(<2 x float> %x, i32 %y) ret <2 x float> %a } -declare <2 x float> @llvm.powi.v2f32.i32(<2 x float>, i32) define <3 x float> @powi_v3f32(<3 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v3f32: @@ -208,7 +206,6 @@ define <3 x float> @powi_v3f32(<3 x float> %x, i32 %y) nounwind { %a = call <3 x float> @llvm.powi.v3f32.i32(<3 x float> %x, i32 %y) ret <3 x float> %a } -declare <3 x float> @llvm.powi.v3f32.i32(<3 x float>, i32) define <4 x float> @powi_v4f32(<4 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v4f32: @@ -340,7 +337,6 @@ define <4 x float> @powi_v4f32(<4 x float> %x, i32 %y) nounwind { %a = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> %x, i32 %y) ret <4 x float> %a } -declare <4 x float> @llvm.powi.v4f32.i32(<4 x float>, i32) define <8 x float> @powi_v8f32(<8 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v8f32: @@ -602,7 +598,6 @@ define <8 x float> @powi_v8f32(<8 x float> %x, i32 %y) nounwind { %a = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %x, i32 %y) ret <8 x float> %a } -declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32) define <16 x float> @powi_v16f32(<16 x float> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v16f32: @@ -851,7 +846,6 @@ define <16 x float> @powi_v16f32(<16 x float> %x, i32 %y) nounwind { %a = call <16 x float> @llvm.powi.v16f32.i32(<16 x float> %x, i32 %y) ret <16 x float> %a } -declare <16 x float> @llvm.powi.v16f32.i32(<16 x float>, i32) define <1 x double> @powi_v1f64(<1 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v1f64: @@ -883,7 +877,6 @@ define <1 x double> @powi_v1f64(<1 x double> %x, i32 %y) nounwind { %a = call <1 x double> @llvm.powi.v1f64.i32(<1 x double> %x, i32 %y) ret <1 x double> %a } -declare <1 x double> @llvm.powi.v1f64.i32(<1 x double>, i32) define <2 x double> @powi_v2f64(<2 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v2f64: @@ -949,7 +942,6 @@ define <2 x double> @powi_v2f64(<2 x double> %x, i32 %y) nounwind { %a = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> %x, i32 %y) ret <2 x double> %a } -declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) define <4 x double> @powi_v4f64(<4 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v4f64: @@ -1095,7 +1087,6 @@ define <4 x double> @powi_v4f64(<4 x double> %x, i32 %y) nounwind { %a = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %x, i32 %y) ret <4 x double> %a } -declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32) define <8 x double> @powi_v8f64(<8 x double> %x, i32 %y) nounwind { ; RV32-LABEL: powi_v8f64: @@ -1248,4 +1239,3 @@ define <8 x double> @powi_v8f64(<8 x double> %x, i32 %y) nounwind { %a = call <8 x double> @llvm.powi.v8f64.i32(<8 x double> %x, i32 %y) ret <8 x double> %a } -declare <8 x double> @llvm.powi.v8f64.i32(<8 x double>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll index bc86be6f62fd1..53dbbedc9a055 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i1> @vfptosi_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i1_v4f16: ; ZVFH: # %bb.0: @@ -46,8 +44,6 @@ define <4 x i1> @vfptosi_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i1> @vfptosi_v4i1_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f32: ; CHECK: # %bb.0: @@ -70,8 +66,6 @@ define <4 x i1> @vfptosi_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i1> @vfptosi_v4i1_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll index cbc4c69669b51..96eda109e1c70 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i7> @llvm.vp.fptosi.v4i7.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -27,8 +25,6 @@ define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x i7> %v } -declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i8> @vfptosi_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i8_v4f16: ; ZVFH: # %bb.0: @@ -71,8 +67,6 @@ define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i16> @vfptosi_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i16_v4f16: ; ZVFH: # %bb.0: @@ -109,8 +103,6 @@ define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i32> @vfptosi_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i32_v4f16: ; ZVFH: # %bb.0: @@ -149,8 +141,6 @@ define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i64> @vfptosi_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i64_v4f16: ; ZVFH: # %bb.0: @@ -191,8 +181,6 @@ define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i8> @vfptosi_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f32: ; CHECK: # %bb.0: @@ -217,8 +205,6 @@ define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i16> @vfptosi_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f32: ; CHECK: # %bb.0: @@ -241,8 +227,6 @@ define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i32> @vfptosi_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f32: ; CHECK: # %bb.0: @@ -263,8 +247,6 @@ define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i64> @vfptosi_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f32: ; CHECK: # %bb.0: @@ -287,8 +269,6 @@ define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i8> @vfptosi_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f64: ; CHECK: # %bb.0: @@ -317,8 +297,6 @@ define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i16> @vfptosi_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f64: ; CHECK: # %bb.0: @@ -343,8 +321,6 @@ define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i32> @vfptosi_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f64: ; CHECK: # %bb.0: @@ -367,8 +343,6 @@ define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i64> @vfptosi_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f64: ; CHECK: # %bb.0: @@ -389,8 +363,6 @@ define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v32i64_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll index c41f14076db31..55fccb5db1aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i1> @vfptoui_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i1_v4f16: ; ZVFH: # %bb.0: @@ -46,8 +44,6 @@ define <4 x i1> @vfptoui_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i1> @vfptoui_v4i1_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f32: ; CHECK: # %bb.0: @@ -70,8 +66,6 @@ define <4 x i1> @vfptoui_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i1> %v } -declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i1> @vfptoui_v4i1_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll index 5dd3e0372f401..4020100bf364b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x i7> @llvm.vp.fptoui.v4i7.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -27,8 +25,6 @@ define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x i7> %v } -declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i8> @vfptoui_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i8_v4f16: ; ZVFH: # %bb.0: @@ -71,8 +67,6 @@ define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i16> @vfptoui_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i16_v4f16: ; ZVFH: # %bb.0: @@ -109,8 +103,6 @@ define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i32> @vfptoui_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i32_v4f16: ; ZVFH: # %bb.0: @@ -149,8 +141,6 @@ define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x i64> @vfptoui_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i64_v4f16: ; ZVFH: # %bb.0: @@ -191,8 +181,6 @@ define <4 x i64> @vfptoui_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i8> @vfptoui_v4i8_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f32: ; CHECK: # %bb.0: @@ -217,8 +205,6 @@ define <4 x i8> @vfptoui_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i16> @vfptoui_v4i16_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f32: ; CHECK: # %bb.0: @@ -241,8 +227,6 @@ define <4 x i16> @vfptoui_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i32> @vfptoui_v4i32_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f32: ; CHECK: # %bb.0: @@ -263,8 +247,6 @@ define <4 x i32> @vfptoui_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x i64> @vfptoui_v4i64_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f32: ; CHECK: # %bb.0: @@ -287,8 +269,6 @@ define <4 x i64> @vfptoui_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl ret <4 x i64> %v } -declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i8> @vfptoui_v4i8_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f64: ; CHECK: # %bb.0: @@ -317,8 +297,6 @@ define <4 x i8> @vfptoui_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x i8> %v } -declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i16> @vfptoui_v4i16_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f64: ; CHECK: # %bb.0: @@ -343,8 +321,6 @@ define <4 x i16> @vfptoui_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i32> @vfptoui_v4i32_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f64: ; CHECK: # %bb.0: @@ -367,8 +343,6 @@ define <4 x i32> @vfptoui_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x i64> @vfptoui_v4i64_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f64: ; CHECK: # %bb.0: @@ -389,8 +363,6 @@ define <4 x i64> @vfptoui_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v32i64_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll index 13891cb84e0f2..e509722b623a2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s - -declare <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x half> @vfptrunc_v2f16_v2f32(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f32: ; CHECK: # %bb.0: @@ -29,8 +26,6 @@ define <2 x half> @vfptrunc_v2f16_v2f32_unmasked(<2 x float> %a, i32 zeroext %vl ret <2 x half> %v } -declare <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x half> @vfptrunc_v2f16_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f64: ; CHECK: # %bb.0: @@ -55,8 +50,6 @@ define <2 x half> @vfptrunc_v2f16_v2f64_unmasked(<2 x double> %a, i32 zeroext %v ret <2 x half> %v } -declare <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double>, <2 x i1>, i32) - define <2 x float> @vfptrunc_v2f32_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f32_v2f64: ; CHECK: # %bb.0: @@ -79,8 +72,6 @@ define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext % ret <2 x float> %v } -declare <15 x float> @llvm.vp.fptrunc.v15f64.v15f32(<15 x double>, <15 x i1>, i32) - define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v15f32_v15f64: ; CHECK: # %bb.0: @@ -92,8 +83,6 @@ define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 ret <15 x float> %v } -declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i32) - define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v32f32_v32f64: ; CHECK: # %bb.0: @@ -123,8 +112,6 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 ret <32 x float> %v } -declare <2 x bfloat> @llvm.vp.fptrunc.v2bf16.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x bfloat> @vfptrunc_v2bf16_v2f32(<2 x float> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2bf16_v2f32: ; CHECK: # %bb.0: @@ -147,8 +134,6 @@ define <2 x bfloat> @vfptrunc_v2bf16_v2f32_unmasked(<2 x float> %a, i32 zeroext ret <2 x bfloat> %v } -declare <2 x bfloat> @llvm.vp.fptrunc.v2bf16.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x bfloat> @vfptrunc_v2bf16_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2bf16_v2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll index c0b67dd603ebb..ad56aee72a432 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll @@ -28,7 +28,6 @@ define <1 x half> @round_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half>, metadata) define <2 x half> @round_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: round_v2f16: @@ -52,7 +51,6 @@ define <2 x half> @round_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half>, metadata) define <4 x half> @round_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: round_v4f16: @@ -76,7 +74,6 @@ define <4 x half> @round_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half>, metadata) define <8 x half> @round_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: round_v8f16: @@ -100,7 +97,6 @@ define <8 x half> @round_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half>, metadata) define <16 x half> @round_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: round_v16f16: @@ -124,7 +120,6 @@ define <16 x half> @round_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half>, metadata) define <32 x half> @round_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: round_v32f16: @@ -149,7 +144,6 @@ define <32 x half> @round_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half>, metadata) define <1 x float> @round_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: round_v1f32: @@ -172,7 +166,6 @@ define <1 x float> @round_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata) define <2 x float> @round_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: round_v2f32: @@ -195,7 +188,6 @@ define <2 x float> @round_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float>, metadata) define <4 x float> @round_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: round_v4f32: @@ -218,7 +210,6 @@ define <4 x float> @round_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) define <8 x float> @round_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: round_v8f32: @@ -241,7 +232,6 @@ define <8 x float> @round_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float>, metadata) define <16 x float> @round_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: round_v16f32: @@ -264,7 +254,6 @@ define <16 x float> @round_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float>, metadata) define <1 x double> @round_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: round_v1f64: @@ -306,7 +295,6 @@ define <1 x double> @round_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata) define <2 x double> @round_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: round_v2f64: @@ -348,7 +336,6 @@ define <2 x double> @round_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) define <4 x double> @round_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: round_v4f64: @@ -390,7 +377,6 @@ define <4 x double> @round_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) define <8 x double> @round_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: round_v8f64: @@ -432,4 +418,3 @@ define <8 x double> @round_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll index 455dc0b83c03d..4c6d420767ebf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll @@ -48,7 +48,6 @@ define <1 x half> @round_v1f16(<1 x half> %x) { %a = call <1 x half> @llvm.round.v1f16(<1 x half> %x) ret <1 x half> %a } -declare <1 x half> @llvm.round.v1f16(<1 x half>) define <2 x half> @round_v2f16(<2 x half> %x) { ; ZVFH-LABEL: round_v2f16: @@ -88,7 +87,6 @@ define <2 x half> @round_v2f16(<2 x half> %x) { %a = call <2 x half> @llvm.round.v2f16(<2 x half> %x) ret <2 x half> %a } -declare <2 x half> @llvm.round.v2f16(<2 x half>) define <4 x half> @round_v4f16(<4 x half> %x) { ; ZVFH-LABEL: round_v4f16: @@ -128,7 +126,6 @@ define <4 x half> @round_v4f16(<4 x half> %x) { %a = call <4 x half> @llvm.round.v4f16(<4 x half> %x) ret <4 x half> %a } -declare <4 x half> @llvm.round.v4f16(<4 x half>) define <8 x half> @round_v8f16(<8 x half> %x) { ; ZVFH-LABEL: round_v8f16: @@ -168,7 +165,6 @@ define <8 x half> @round_v8f16(<8 x half> %x) { %a = call <8 x half> @llvm.round.v8f16(<8 x half> %x) ret <8 x half> %a } -declare <8 x half> @llvm.round.v8f16(<8 x half>) define <16 x half> @round_v16f16(<16 x half> %x) { ; ZVFH-LABEL: round_v16f16: @@ -208,7 +204,6 @@ define <16 x half> @round_v16f16(<16 x half> %x) { %a = call <16 x half> @llvm.round.v16f16(<16 x half> %x) ret <16 x half> %a } -declare <16 x half> @llvm.round.v16f16(<16 x half>) define <32 x half> @round_v32f16(<32 x half> %x) { ; ZVFH-LABEL: round_v32f16: @@ -250,7 +245,6 @@ define <32 x half> @round_v32f16(<32 x half> %x) { %a = call <32 x half> @llvm.round.v32f16(<32 x half> %x) ret <32 x half> %a } -declare <32 x half> @llvm.round.v32f16(<32 x half>) define <1 x float> @round_v1f32(<1 x float> %x) { ; CHECK-LABEL: round_v1f32: @@ -270,7 +264,6 @@ define <1 x float> @round_v1f32(<1 x float> %x) { %a = call <1 x float> @llvm.round.v1f32(<1 x float> %x) ret <1 x float> %a } -declare <1 x float> @llvm.round.v1f32(<1 x float>) define <2 x float> @round_v2f32(<2 x float> %x) { ; CHECK-LABEL: round_v2f32: @@ -290,7 +283,6 @@ define <2 x float> @round_v2f32(<2 x float> %x) { %a = call <2 x float> @llvm.round.v2f32(<2 x float> %x) ret <2 x float> %a } -declare <2 x float> @llvm.round.v2f32(<2 x float>) define <4 x float> @round_v4f32(<4 x float> %x) { ; CHECK-LABEL: round_v4f32: @@ -310,7 +302,6 @@ define <4 x float> @round_v4f32(<4 x float> %x) { %a = call <4 x float> @llvm.round.v4f32(<4 x float> %x) ret <4 x float> %a } -declare <4 x float> @llvm.round.v4f32(<4 x float>) define <8 x float> @round_v8f32(<8 x float> %x) { ; CHECK-LABEL: round_v8f32: @@ -330,7 +321,6 @@ define <8 x float> @round_v8f32(<8 x float> %x) { %a = call <8 x float> @llvm.round.v8f32(<8 x float> %x) ret <8 x float> %a } -declare <8 x float> @llvm.round.v8f32(<8 x float>) define <16 x float> @round_v16f32(<16 x float> %x) { ; CHECK-LABEL: round_v16f32: @@ -350,7 +340,6 @@ define <16 x float> @round_v16f32(<16 x float> %x) { %a = call <16 x float> @llvm.round.v16f32(<16 x float> %x) ret <16 x float> %a } -declare <16 x float> @llvm.round.v16f32(<16 x float>) define <1 x double> @round_v1f64(<1 x double> %x) { ; RV32ZVFH-LABEL: round_v1f64: @@ -417,7 +406,6 @@ define <1 x double> @round_v1f64(<1 x double> %x) { %a = call <1 x double> @llvm.round.v1f64(<1 x double> %x) ret <1 x double> %a } -declare <1 x double> @llvm.round.v1f64(<1 x double>) define <2 x double> @round_v2f64(<2 x double> %x) { ; RV32ZVFH-LABEL: round_v2f64: @@ -484,7 +472,6 @@ define <2 x double> @round_v2f64(<2 x double> %x) { %a = call <2 x double> @llvm.round.v2f64(<2 x double> %x) ret <2 x double> %a } -declare <2 x double> @llvm.round.v2f64(<2 x double>) define <4 x double> @round_v4f64(<4 x double> %x) { ; RV32ZVFH-LABEL: round_v4f64: @@ -551,7 +538,6 @@ define <4 x double> @round_v4f64(<4 x double> %x) { %a = call <4 x double> @llvm.round.v4f64(<4 x double> %x) ret <4 x double> %a } -declare <4 x double> @llvm.round.v4f64(<4 x double>) define <8 x double> @round_v8f64(<8 x double> %x) { ; RV32ZVFH-LABEL: round_v8f64: @@ -618,4 +604,3 @@ define <8 x double> @round_v8f64(<8 x double> %x) { %a = call <8 x double> @llvm.round.v8f64(<8 x double> %x) ret <8 x double> %a } -declare <8 x double> @llvm.round.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll index b1d35d3bcdc1d..5e5c64fd891fd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll @@ -28,7 +28,6 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half>, metadata) define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v2f16: @@ -52,7 +51,6 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half>, metadata) define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v4f16: @@ -76,7 +74,6 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half>, metadata) define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v8f16: @@ -100,7 +97,6 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half>, metadata) define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v16f16: @@ -124,7 +120,6 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half>, metadata) define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v32f16: @@ -149,7 +144,6 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half>, metadata) define <1 x float> @roundeven_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v1f32: @@ -172,7 +166,6 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float>, metadata) define <2 x float> @roundeven_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v2f32: @@ -195,7 +188,6 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float>, metadata) define <4 x float> @roundeven_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v4f32: @@ -218,7 +210,6 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata) define <8 x float> @roundeven_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v8f32: @@ -241,7 +232,6 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float>, metadata) define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: roundeven_v16f32: @@ -264,7 +254,6 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float>, metadata) define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: roundeven_v1f64: @@ -306,7 +295,6 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata) define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: roundeven_v2f64: @@ -348,7 +336,6 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata) define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: roundeven_v4f64: @@ -390,7 +377,6 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double>, metadata) define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: roundeven_v8f64: @@ -432,4 +418,3 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll index f8b3cb5897dfa..b175549c132b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll @@ -48,7 +48,6 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) { %a = call <1 x half> @llvm.roundeven.v1f16(<1 x half> %x) ret <1 x half> %a } -declare <1 x half> @llvm.roundeven.v1f16(<1 x half>) define <2 x half> @roundeven_v2f16(<2 x half> %x) { ; ZVFH-LABEL: roundeven_v2f16: @@ -88,7 +87,6 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) { %a = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %x) ret <2 x half> %a } -declare <2 x half> @llvm.roundeven.v2f16(<2 x half>) define <4 x half> @roundeven_v4f16(<4 x half> %x) { ; ZVFH-LABEL: roundeven_v4f16: @@ -128,7 +126,6 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) { %a = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %x) ret <4 x half> %a } -declare <4 x half> @llvm.roundeven.v4f16(<4 x half>) define <8 x half> @roundeven_v8f16(<8 x half> %x) { ; ZVFH-LABEL: roundeven_v8f16: @@ -168,7 +165,6 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) { %a = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %x) ret <8 x half> %a } -declare <8 x half> @llvm.roundeven.v8f16(<8 x half>) define <16 x half> @roundeven_v16f16(<16 x half> %x) { ; ZVFH-LABEL: roundeven_v16f16: @@ -208,7 +204,6 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) { %a = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %x) ret <16 x half> %a } -declare <16 x half> @llvm.roundeven.v16f16(<16 x half>) define <32 x half> @roundeven_v32f16(<32 x half> %x) { ; ZVFH-LABEL: roundeven_v32f16: @@ -250,7 +245,6 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) { %a = call <32 x half> @llvm.roundeven.v32f16(<32 x half> %x) ret <32 x half> %a } -declare <32 x half> @llvm.roundeven.v32f16(<32 x half>) define <1 x float> @roundeven_v1f32(<1 x float> %x) { ; CHECK-LABEL: roundeven_v1f32: @@ -270,7 +264,6 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) { %a = call <1 x float> @llvm.roundeven.v1f32(<1 x float> %x) ret <1 x float> %a } -declare <1 x float> @llvm.roundeven.v1f32(<1 x float>) define <2 x float> @roundeven_v2f32(<2 x float> %x) { ; CHECK-LABEL: roundeven_v2f32: @@ -290,7 +283,6 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) { %a = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %x) ret <2 x float> %a } -declare <2 x float> @llvm.roundeven.v2f32(<2 x float>) define <4 x float> @roundeven_v4f32(<4 x float> %x) { ; CHECK-LABEL: roundeven_v4f32: @@ -310,7 +302,6 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) { %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x) ret <4 x float> %a } -declare <4 x float> @llvm.roundeven.v4f32(<4 x float>) define <8 x float> @roundeven_v8f32(<8 x float> %x) { ; CHECK-LABEL: roundeven_v8f32: @@ -330,7 +321,6 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) { %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x) ret <8 x float> %a } -declare <8 x float> @llvm.roundeven.v8f32(<8 x float>) define <16 x float> @roundeven_v16f32(<16 x float> %x) { ; CHECK-LABEL: roundeven_v16f32: @@ -350,7 +340,6 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) { %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x) ret <16 x float> %a } -declare <16 x float> @llvm.roundeven.v16f32(<16 x float>) define <1 x double> @roundeven_v1f64(<1 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v1f64: @@ -417,7 +406,6 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) { %a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x) ret <1 x double> %a } -declare <1 x double> @llvm.roundeven.v1f64(<1 x double>) define <2 x double> @roundeven_v2f64(<2 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v2f64: @@ -484,7 +472,6 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) { %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x) ret <2 x double> %a } -declare <2 x double> @llvm.roundeven.v2f64(<2 x double>) define <4 x double> @roundeven_v4f64(<4 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v4f64: @@ -551,7 +538,6 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) { %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x) ret <4 x double> %a } -declare <4 x double> @llvm.roundeven.v4f64(<4 x double>) define <8 x double> @roundeven_v8f64(<8 x double> %x) { ; RV32ZVFH-LABEL: roundeven_v8f64: @@ -618,4 +604,3 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) { %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x) ret <8 x double> %a } -declare <8 x double> @llvm.roundeven.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll index 81679806f32d8..da04b08aa5db5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s -declare <2 x i8> @llvm.vp.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32) define <2 x i8> @fshr_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i8: ; CHECK: # %bb.0: @@ -19,7 +18,6 @@ define <2 x i8> @fshr_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i ret <2 x i8> %res } -declare <2 x i8> @llvm.vp.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>, <2 x i1>, i32) define <2 x i8> @fshl_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i8: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define <2 x i8> @fshl_v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i ret <2 x i8> %res } -declare <4 x i8> @llvm.vp.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32) define <4 x i8> @fshr_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i8: ; CHECK: # %bb.0: @@ -53,7 +50,6 @@ define <4 x i8> @fshr_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i ret <4 x i8> %res } -declare <4 x i8> @llvm.vp.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>, <4 x i1>, i32) define <4 x i8> @fshl_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i8: ; CHECK: # %bb.0: @@ -70,7 +66,6 @@ define <4 x i8> @fshl_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i ret <4 x i8> %res } -declare <8 x i8> @llvm.vp.fshr.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @fshr_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i8: ; CHECK: # %bb.0: @@ -87,7 +82,6 @@ define <8 x i8> @fshr_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i ret <8 x i8> %res } -declare <8 x i8> @llvm.vp.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @fshl_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i8: ; CHECK: # %bb.0: @@ -104,7 +98,6 @@ define <8 x i8> @fshl_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i ret <8 x i8> %res } -declare <16 x i8> @llvm.vp.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32) define <16 x i8> @fshr_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i8: ; CHECK: # %bb.0: @@ -121,7 +114,6 @@ define <16 x i8> @fshr_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> ret <16 x i8> %res } -declare <16 x i8> @llvm.vp.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i1>, i32) define <16 x i8> @fshl_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i8: ; CHECK: # %bb.0: @@ -138,7 +130,6 @@ define <16 x i8> @fshl_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> ret <16 x i8> %res } -declare <32 x i8> @llvm.vp.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>, <32 x i1>, i32) define <32 x i8> @fshr_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i8: ; CHECK: # %bb.0: @@ -155,7 +146,6 @@ define <32 x i8> @fshr_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> ret <32 x i8> %res } -declare <32 x i8> @llvm.vp.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>, <32 x i1>, i32) define <32 x i8> @fshl_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i8: ; CHECK: # %bb.0: @@ -172,7 +162,6 @@ define <32 x i8> @fshl_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> ret <32 x i8> %res } -declare <64 x i8> @llvm.vp.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>, <64 x i1>, i32) define <64 x i8> @fshr_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v64i8: ; CHECK: # %bb.0: @@ -189,7 +178,6 @@ define <64 x i8> @fshr_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> ret <64 x i8> %res } -declare <64 x i8> @llvm.vp.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>, <64 x i1>, i32) define <64 x i8> @fshl_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v64i8: ; CHECK: # %bb.0: @@ -206,7 +194,6 @@ define <64 x i8> @fshl_v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> ret <64 x i8> %res } -declare <2 x i16> @llvm.vp.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32) define <2 x i16> @fshr_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i16: ; CHECK: # %bb.0: @@ -223,7 +210,6 @@ define <2 x i16> @fshr_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> ret <2 x i16> %res } -declare <2 x i16> @llvm.vp.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>, <2 x i1>, i32) define <2 x i16> @fshl_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i16: ; CHECK: # %bb.0: @@ -240,7 +226,6 @@ define <2 x i16> @fshl_v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> ret <2 x i16> %res } -declare <4 x i16> @llvm.vp.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32) define <4 x i16> @fshr_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i16: ; CHECK: # %bb.0: @@ -257,7 +242,6 @@ define <4 x i16> @fshr_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> ret <4 x i16> %res } -declare <4 x i16> @llvm.vp.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i1>, i32) define <4 x i16> @fshl_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i16: ; CHECK: # %bb.0: @@ -274,7 +258,6 @@ define <4 x i16> @fshl_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> ret <4 x i16> %res } -declare <8 x i16> @llvm.vp.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) define <8 x i16> @fshr_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i16: ; CHECK: # %bb.0: @@ -291,7 +274,6 @@ define <8 x i16> @fshr_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> ret <8 x i16> %res } -declare <8 x i16> @llvm.vp.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) define <8 x i16> @fshl_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i16: ; CHECK: # %bb.0: @@ -308,7 +290,6 @@ define <8 x i16> @fshl_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> ret <8 x i16> %res } -declare <16 x i16> @llvm.vp.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32) define <16 x i16> @fshr_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i16: ; CHECK: # %bb.0: @@ -325,7 +306,6 @@ define <16 x i16> @fshr_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 ret <16 x i16> %res } -declare <16 x i16> @llvm.vp.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>, <16 x i1>, i32) define <16 x i16> @fshl_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i16: ; CHECK: # %bb.0: @@ -342,7 +322,6 @@ define <16 x i16> @fshl_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 ret <16 x i16> %res } -declare <32 x i16> @llvm.vp.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>, <32 x i1>, i32) define <32 x i16> @fshr_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i16: ; CHECK: # %bb.0: @@ -359,7 +338,6 @@ define <32 x i16> @fshr_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 ret <32 x i16> %res } -declare <32 x i16> @llvm.vp.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>, <32 x i1>, i32) define <32 x i16> @fshl_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i16: ; CHECK: # %bb.0: @@ -376,7 +354,6 @@ define <32 x i16> @fshl_v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 ret <32 x i16> %res } -declare <2 x i32> @llvm.vp.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32) define <2 x i32> @fshr_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i32: ; CHECK: # %bb.0: @@ -394,7 +371,6 @@ define <2 x i32> @fshr_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> ret <2 x i32> %res } -declare <2 x i32> @llvm.vp.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i1>, i32) define <2 x i32> @fshl_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i32: ; CHECK: # %bb.0: @@ -412,7 +388,6 @@ define <2 x i32> @fshl_v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> ret <2 x i32> %res } -declare <4 x i32> @llvm.vp.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32) define <4 x i32> @fshr_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i32: ; CHECK: # %bb.0: @@ -430,7 +405,6 @@ define <4 x i32> @fshr_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> ret <4 x i32> %res } -declare <4 x i32> @llvm.vp.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i1>, i32) define <4 x i32> @fshl_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i32: ; CHECK: # %bb.0: @@ -448,7 +422,6 @@ define <4 x i32> @fshl_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> ret <4 x i32> %res } -declare <8 x i32> @llvm.vp.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @fshr_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i32: ; CHECK: # %bb.0: @@ -466,7 +439,6 @@ define <8 x i32> @fshr_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> ret <8 x i32> %res } -declare <8 x i32> @llvm.vp.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @fshl_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i32: ; CHECK: # %bb.0: @@ -484,7 +456,6 @@ define <8 x i32> @fshl_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> ret <8 x i32> %res } -declare <16 x i32> @llvm.vp.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32) define <16 x i32> @fshr_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i32: ; CHECK: # %bb.0: @@ -502,7 +473,6 @@ define <16 x i32> @fshr_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 ret <16 x i32> %res } -declare <16 x i32> @llvm.vp.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>, <16 x i1>, i32) define <16 x i32> @fshl_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i32: ; CHECK: # %bb.0: @@ -520,7 +490,6 @@ define <16 x i32> @fshl_v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 ret <16 x i32> %res } -declare <2 x i64> @llvm.vp.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32) define <2 x i64> @fshr_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i64: ; CHECK: # %bb.0: @@ -538,7 +507,6 @@ define <2 x i64> @fshr_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> ret <2 x i64> %res } -declare <2 x i64> @llvm.vp.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i1>, i32) define <2 x i64> @fshl_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i64: ; CHECK: # %bb.0: @@ -556,7 +524,6 @@ define <2 x i64> @fshl_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> ret <2 x i64> %res } -declare <4 x i64> @llvm.vp.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32) define <4 x i64> @fshr_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i64: ; CHECK: # %bb.0: @@ -574,7 +541,6 @@ define <4 x i64> @fshr_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> ret <4 x i64> %res } -declare <4 x i64> @llvm.vp.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i1>, i32) define <4 x i64> @fshl_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i64: ; CHECK: # %bb.0: @@ -592,7 +558,6 @@ define <4 x i64> @fshl_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> ret <4 x i64> %res } -declare <7 x i64> @llvm.vp.fshr.v7i64(<7 x i64>, <7 x i64>, <7 x i64>, <7 x i1>, i32) define <7 x i64> @fshr_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v7i64: ; CHECK: # %bb.0: @@ -610,7 +575,6 @@ define <7 x i64> @fshr_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> ret <7 x i64> %res } -declare <7 x i64> @llvm.vp.fshl.v7i64(<7 x i64>, <7 x i64>, <7 x i64>, <7 x i1>, i32) define <7 x i64> @fshl_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v7i64: ; CHECK: # %bb.0: @@ -628,7 +592,6 @@ define <7 x i64> @fshl_v7i64(<7 x i64> %a, <7 x i64> %b, <7 x i64> %c, <7 x i1> ret <7 x i64> %res } -declare <8 x i64> @llvm.vp.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32) define <8 x i64> @fshr_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i64: ; CHECK: # %bb.0: @@ -646,7 +609,6 @@ define <8 x i64> @fshr_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> ret <8 x i64> %res } -declare <8 x i64> @llvm.vp.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i1>, i32) define <8 x i64> @fshl_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i64: ; CHECK: # %bb.0: @@ -664,7 +626,6 @@ define <8 x i64> @fshl_v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> ret <8 x i64> %res } -declare <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32) define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i64: ; CHECK: # %bb.0: @@ -700,7 +661,6 @@ define <16 x i64> @fshr_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 ret <16 x i64> %res } -declare <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64>, <16 x i64>, <16 x i64>, <16 x i1>, i32) define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll index b7cf84fba4210..7813d7f309b6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll @@ -24,7 +24,6 @@ define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp { %a = call <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half> %x, metadata !"fpexcept.strict") ret <1 x half> %a } -declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata) define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { ; CHECK-LABEL: trunc_v2f16: @@ -46,7 +45,6 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { %a = call <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half> %x, metadata !"fpexcept.strict") ret <2 x half> %a } -declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata) define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { ; CHECK-LABEL: trunc_v4f16: @@ -68,7 +66,6 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { %a = call <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half> %x, metadata !"fpexcept.strict") ret <4 x half> %a } -declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata) define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { ; CHECK-LABEL: trunc_v8f16: @@ -90,7 +87,6 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { %a = call <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half> %x, metadata !"fpexcept.strict") ret <8 x half> %a } -declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata) define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { ; CHECK-LABEL: trunc_v16f16: @@ -112,7 +108,6 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { %a = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half> %x, metadata !"fpexcept.strict") ret <16 x half> %a } -declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata) define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: trunc_v32f16: @@ -135,7 +130,6 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { %a = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %x, metadata !"fpexcept.strict") ret <32 x half> %a } -declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata) define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp { ; CHECK-LABEL: trunc_v1f32: @@ -156,7 +150,6 @@ define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp { %a = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float> %x, metadata !"fpexcept.strict") ret <1 x float> %a } -declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata) define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp { ; CHECK-LABEL: trunc_v2f32: @@ -177,7 +170,6 @@ define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp { %a = call <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float> %x, metadata !"fpexcept.strict") ret <2 x float> %a } -declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata) define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp { ; CHECK-LABEL: trunc_v4f32: @@ -198,7 +190,6 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp { %a = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") ret <4 x float> %a } -declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp { ; CHECK-LABEL: trunc_v8f32: @@ -219,7 +210,6 @@ define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp { %a = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float> %x, metadata !"fpexcept.strict") ret <8 x float> %a } -declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata) define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { ; CHECK-LABEL: trunc_v16f32: @@ -240,7 +230,6 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { %a = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %x, metadata !"fpexcept.strict") ret <16 x float> %a } -declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { ; RV32-LABEL: trunc_v1f64: @@ -278,7 +267,6 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } -declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata) define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { ; RV32-LABEL: trunc_v2f64: @@ -316,7 +304,6 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } -declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { ; RV32-LABEL: trunc_v4f64: @@ -354,7 +341,6 @@ define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } -declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { ; RV32-LABEL: trunc_v8f64: @@ -392,4 +378,3 @@ define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } -declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll index a99efc97f7e63..8efb48a8cb691 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll @@ -13,7 +13,6 @@ define <4 x i32> @insert_subvector_load_v4i32_v4i32(<4 x i32> %v1, ptr %p) { ret <4 x i32> %v3 } -declare <4 x i32> @llvm.vp.load.v4i32(ptr, <4 x i1>, i32) define <4 x i32> @insert_subvector_vp_load_v4i32_v4i32(<4 x i32> %v1, ptr %p, <4 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_load_v4i32_v4i32: ; CHECK: # %bb.0: @@ -26,7 +25,6 @@ define <4 x i32> @insert_subvector_vp_load_v4i32_v4i32(<4 x i32> %v1, ptr %p, <4 } ; Can't fold this in because the load has a non-poison passthru that isn't equal to the vmv.v.v passtrhu -declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>) define <4 x i32> @insert_subvector_load_unfoldable_passthru_v4i32_v4i32(<4 x i32> %v1, ptr %p, <4 x i1> %mask, <4 x i32> %passthru) { ; CHECK-LABEL: insert_subvector_load_unfoldable_passthru_v4i32_v4i32: ; CHECK: # %bb.0: @@ -65,7 +63,6 @@ define <4 x i32> @insert_subvector_add_v4i32_v4i32(<4 x i32> %v1, <4 x i32> %v2) ret <4 x i32> %v4 } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) define <4 x i32> @insert_subvector_vp_add_v4i32_v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_add_v4i32_v4i32: ; CHECK: # %bb.0: @@ -91,7 +88,6 @@ define <4 x i32> @insert_subvector_load_v4i32_v2i32(<4 x i32> %v1, ptr %p) { ret <4 x i32> %v4 } -declare <2 x i32> @llvm.vp.load.v2i32(ptr, <2 x i1>, i32) define <4 x i32> @insert_subvector_vp_load_v4i32_v2i32(<4 x i32> %v1, ptr %p, <2 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_load_v4i32_v2i32: ; CHECK: # %bb.0: @@ -121,7 +117,6 @@ define <4 x i32> @insert_subvector_add_v4i32_v2i32(<4 x i32> %v1, <2 x i32> %v2) ret <4 x i32> %v5 } -declare <2 x i32> @llvm.vp.add.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) define <4 x i32> @insert_subvector_vp_add_v4i32_v2i32(<4 x i32> %v1, <2 x i32> %v2, <2 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_add_v4i32_v2i32: ; CHECK: # %bb.0: @@ -148,7 +143,6 @@ define <4 x i32> @insert_subvector_load_v4i32_v8i32(<4 x i32> %v1, ptr %p) { ret <4 x i32> %v4 } -declare <8 x i32> @llvm.vp.load.v8i32(ptr, <8 x i1>, i32) define <4 x i32> @insert_subvector_vp_load_v4i32_v8i32(<4 x i32> %v1, ptr %p, <8 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_load_v4i32_v8i32: ; CHECK: # %bb.0: @@ -177,7 +171,6 @@ define <4 x i32> @insert_subvector_add_v4i32_v8i32(<4 x i32> %v1, <8 x i32> %v2) ret <4 x i32> %v5 } -declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) define <4 x i32> @insert_subvector_vp_add_v4i32_v8i32(<4 x i32> %v1, <8 x i32> %v2, <8 x i1> %mask) { ; CHECK-LABEL: insert_subvector_vp_add_v4i32_v8i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll index 00328f9d33d3e..c0473eea56552 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -140,7 +140,6 @@ define @insert_nxv8i32_v4i32_0( %vec, <4 x ret %v } - define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) { ; CHECK-LABEL: insert_v4i32_v4i32_0: ; CHECK: # %bb.0: @@ -252,7 +251,6 @@ bar: ret <4 x i32> %w } - define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) { ; VLA-LABEL: insert_v8i32_v2i32_0: ; VLA: # %bb.0: @@ -603,8 +601,6 @@ define @insert_nxv8i1_v8i1_16( %v, ptr %svp) ret %c } -declare @llvm.vector.insert.v2i64.nxv16i64(, <2 x i64>, i64) - define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, ptr %out) { ; VLA-LABEL: insert_v2i64_nxv16i64: ; VLA: # %bb.0: @@ -966,23 +962,6 @@ define @insert_nxv8f16_v2f16_2( %vec, ptr ret %v } -declare <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64) -declare <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64) - -declare <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64) - -declare <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64) -declare <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64) - -declare @llvm.vector.insert.v4i1.nxv2i1(, <4 x i1>, i64) -declare @llvm.vector.insert.v8i1.nxv8i1(, <8 x i1>, i64) - -declare @llvm.vector.insert.v2i16.nxv2i16(, <2 x i16>, i64) - -declare @llvm.vector.insert.v2i32.nxv8i32(, <2 x i32>, i64) -declare @llvm.vector.insert.v4i32.nxv8i32(, <4 x i32>, i64) -declare @llvm.vector.insert.v8i32.nxv8i32(, <8 x i32>, i64) - ; We emit insert_subvectors of fixed vectors at index 0 into undefs as a ; copy_to_regclass or insert_subreg, depending on the register classes of the ; vector types. Make sure that we use the correct type and not the shrunken @@ -991,7 +970,6 @@ declare @llvm.vector.insert.v8i32.nxv8i32(, ; ; t14: nxv2i32 = insert_subvector poison:nxv2i32, t4, Constant:i64<0> ; t15: v8i32 = extract_subvector t14, Constant:i64<0> -declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64) define <4 x i32> @insert_extract_v8i32_v2i32_0(<2 x i32> %v) { ; CHECK-LABEL: insert_extract_v8i32_v2i32_0: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll index 9df71cfc96cc7..7cb00d40e60c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -1493,7 +1493,6 @@ define void @smin_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>) define void @smin_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: smin_vx_v8i16: @@ -1510,7 +1509,6 @@ define void @smin_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) define void @smin_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: smin_vx_v6i16: @@ -1527,7 +1525,6 @@ define void @smin_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.smin.v6i16(<6 x i16>, <6 x i16>) define void @smin_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smin_vx_v4i32: @@ -1544,7 +1541,6 @@ define void @smin_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) define void @smin_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: smin_xv_v16i8: @@ -1710,7 +1706,6 @@ define void @smax_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>) define void @smax_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: smax_vx_v8i16: @@ -1727,7 +1722,6 @@ define void @smax_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) define void @smax_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: smax_vx_v6i16: @@ -1744,7 +1738,6 @@ define void @smax_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.smax.v6i16(<6 x i16>, <6 x i16>) define void @smax_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smax_vx_v4i32: @@ -1761,7 +1754,6 @@ define void @smax_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) define void @smax_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: smax_xv_v16i8: @@ -1927,7 +1919,6 @@ define void @umin_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>) define void @umin_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: umin_vx_v8i16: @@ -1944,7 +1935,6 @@ define void @umin_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) define void @umin_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: umin_vx_v6i16: @@ -1961,7 +1951,6 @@ define void @umin_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.umin.v6i16(<6 x i16>, <6 x i16>) define void @umin_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umin_vx_v4i32: @@ -1978,7 +1967,6 @@ define void @umin_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) define void @umin_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: umin_xv_v16i8: @@ -2144,7 +2132,6 @@ define void @umax_vx_v16i8(ptr %x, i8 %y) { store <16 x i8> %d, ptr %x ret void } -declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>) define void @umax_vx_v8i16(ptr %x, i16 %y) { ; CHECK-LABEL: umax_vx_v8i16: @@ -2161,7 +2148,6 @@ define void @umax_vx_v8i16(ptr %x, i16 %y) { store <8 x i16> %d, ptr %x ret void } -declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) define void @umax_vx_v6i16(ptr %x, i16 %y) { ; CHECK-LABEL: umax_vx_v6i16: @@ -2178,7 +2164,6 @@ define void @umax_vx_v6i16(ptr %x, i16 %y) { store <6 x i16> %d, ptr %x ret void } -declare <6 x i16> @llvm.umax.v6i16(<6 x i16>, <6 x i16>) define void @umax_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umax_vx_v4i32: @@ -2195,7 +2180,6 @@ define void @umax_vx_v4i32(ptr %x, i32 %y) { store <4 x i32> %d, ptr %x ret void } -declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) define void @umax_xv_v16i8(ptr %x, i8 %y) { ; CHECK-LABEL: umax_xv_v16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll index 8b6270e86af36..0abad3bf1c56c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-inttoptr-ptrtoint.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x ptr> @inttoptr_v4p0_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_v4p0_v4i32: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <4 x ptr> @inttoptr_v4p0_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %e ret <4 x ptr> %v } -declare <4 x ptr> @llvm.vp.inttoptr.v4p0.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x ptr> @inttoptr_v4p0_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_v4p0_v4i64: ; CHECK: # %bb.0: @@ -24,8 +20,6 @@ define <4 x ptr> @inttoptr_v4p0_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %e ret <4 x ptr> %v } -declare <4 x i32> @llvm.vp.ptrtoint.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i32> @ptrtoint_v4i32_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_v4i32_v4p0: ; CHECK: # %bb.0: @@ -37,8 +31,6 @@ define <4 x i32> @ptrtoint_v4i32_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %e ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.ptrtoint.v4i64.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i64> @ptrtoint_v4i64_v4p0(<4 x ptr> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_v4i64_v4p0: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll index 466fe744a1376..1282a6f9f8c6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint-vp.ll @@ -21,7 +21,6 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x, <1 x i1> %m, i32 zeroext %e %a = call <1 x i64> @llvm.vp.llrint.v1i64.v1f32(<1 x float> %x, <1 x i1> %m, i32 %evl) ret <1 x i64> %a } -declare <1 x i64> @llvm.vp.llrint.v1i64.v1f32(<1 x float>, <1 x i1>, i32) define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v2i64_v2f32: @@ -40,7 +39,6 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %e %a = call <2 x i64> @llvm.vp.llrint.v2i64.v2f32(<2 x float> %x, <2 x i1> %m, i32 %evl) ret <2 x i64> %a } -declare <2 x i64> @llvm.vp.llrint.v2i64.v2f32(<2 x float>, <2 x i1>, i32) define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v3i64_v3f32: @@ -59,7 +57,6 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %e %a = call <3 x i64> @llvm.vp.llrint.v3i64.v3f32(<3 x float> %x, <3 x i1> %m, i32 %evl) ret <3 x i64> %a } -declare <3 x i64> @llvm.vp.llrint.v3i64.v3f32(<3 x float>, <3 x i1>, i32) define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v4i64_v4f32: @@ -78,7 +75,6 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %e %a = call <4 x i64> @llvm.vp.llrint.v4i64.v4f32(<4 x float> %x, <4 x i1> %m, i32 %evl) ret <4 x i64> %a } -declare <4 x i64> @llvm.vp.llrint.v4i64.v4f32(<4 x float>, <4 x i1>, i32) define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v8i64_v8f32: @@ -97,7 +93,6 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %e %a = call <8 x i64> @llvm.vp.llrint.v8i64.v8f32(<8 x float> %x, <8 x i1> %m, i32 %evl) ret <8 x i64> %a } -declare <8 x i64> @llvm.vp.llrint.v8i64.v8f32(<8 x float>, <8 x i1>, i32) define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v16i64_v16f32: @@ -116,7 +111,6 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroe %a = call <16 x i64> @llvm.vp.llrint.v16i64.v16f32(<16 x float> %x, <16 x i1> %m, i32 %evl) ret <16 x i64> %a } -declare <16 x i64> @llvm.vp.llrint.v16i64.v16f32(<16 x float>, <16 x i1>, i32) define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v1i64_v1f64: @@ -133,7 +127,6 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext % %a = call <1 x i64> @llvm.vp.llrint.v1i64.v1f64(<1 x double> %x, <1 x i1> %m, i32 %evl) ret <1 x i64> %a } -declare <1 x i64> @llvm.vp.llrint.v1i64.v1f64(<1 x double>, <1 x i1>, i32) define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v2i64_v2f64: @@ -150,7 +143,6 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext % %a = call <2 x i64> @llvm.vp.llrint.v2i64.v2f64(<2 x double> %x, <2 x i1> %m, i32 %evl) ret <2 x i64> %a } -declare <2 x i64> @llvm.vp.llrint.v2i64.v2f64(<2 x double>, <2 x i1>, i32) define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v4i64_v4f64: @@ -167,7 +159,6 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext % %a = call <4 x i64> @llvm.vp.llrint.v4i64.v4f64(<4 x double> %x, <4 x i1> %m, i32 %evl) ret <4 x i64> %a } -declare <4 x i64> @llvm.vp.llrint.v4i64.v4f64(<4 x double>, <4 x i1>, i32) define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: llrint_v8i64_v8f64: @@ -184,4 +175,3 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext % %a = call <8 x i64> @llvm.vp.llrint.v8i64.v8f64(<8 x double> %x, <8 x i1> %m, i32 %evl) ret <8 x i64> %a } -declare <8 x i64> @llvm.vp.llrint.v8i64.v8f64(<8 x double>, <8 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll index b9a84ff9b07b9..d7f971baf34b8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll @@ -14,7 +14,6 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>) define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) { ; CHECK-LABEL: llrint_v2i64_v2f32: @@ -26,7 +25,6 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>) define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) { ; CHECK-LABEL: llrint_v3i64_v3f32: @@ -38,7 +36,6 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) { %a = call <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float>) define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) { ; CHECK-LABEL: llrint_v4i64_v4f32: @@ -50,7 +47,6 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>) define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) { ; CHECK-LABEL: llrint_v8i64_v8f32: @@ -62,7 +58,6 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>) define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) { ; CHECK-LABEL: llrint_v16i64_v16f32: @@ -74,7 +69,6 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) { %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>) define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) { ; CHECK-LABEL: llrint_v1i64_v1f64: @@ -85,7 +79,6 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>) define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) { ; CHECK-LABEL: llrint_v2i64_v2f64: @@ -96,7 +89,6 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>) define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) { ; CHECK-LABEL: llrint_v4i64_v4f64: @@ -107,7 +99,6 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>) define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) { ; CHECK-LABEL: llrint_v8i64_v8f64: @@ -118,7 +109,6 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>) define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) { ; CHECK-LABEL: llrint_v1i64_v1f16: @@ -131,7 +121,6 @@ define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>) define <2 x i64> @llrint_v2i64_v2f16(<2 x half> %x) { ; CHECK-LABEL: llrint_v2i64_v2f16: @@ -144,7 +133,6 @@ define <2 x i64> @llrint_v2i64_v2f16(<2 x half> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>) define <3 x i64> @llrint_v3i64_v3f16(<3 x half> %x) { ; CHECK-LABEL: llrint_v3i64_v3f16: @@ -157,7 +145,6 @@ define <3 x i64> @llrint_v3i64_v3f16(<3 x half> %x) { %a = call <3 x i64> @llvm.llrint.v3i64.v3f16(<3 x half> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llrint.v3i64.v3f16(<3 x half>) define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) { ; CHECK-LABEL: llrint_v4i64_v4f16: @@ -170,7 +157,6 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>) define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) { ; CHECK-LABEL: llrint_v8i64_v8f16: @@ -183,7 +169,6 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>) define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) { ; CHECK-LABEL: llrint_v16i64_v16f16: @@ -196,7 +181,6 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) { %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>) define <1 x i64> @llrint_v1i64_v1bf16(<1 x bfloat> %x) { ; CHECK-LABEL: llrint_v1i64_v1bf16: @@ -209,7 +193,6 @@ define <1 x i64> @llrint_v1i64_v1bf16(<1 x bfloat> %x) { %a = call <1 x i64> @llvm.llrint.v1i64.v1bf16(<1 x bfloat> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llrint.v1i64.v1bf16(<1 x bfloat>) define <2 x i64> @llrint_v2i64_v2bf16(<2 x bfloat> %x) { ; CHECK-LABEL: llrint_v2i64_v2bf16: @@ -222,7 +205,6 @@ define <2 x i64> @llrint_v2i64_v2bf16(<2 x bfloat> %x) { %a = call <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llrint.v2i64.v2bf16(<2 x bfloat>) define <3 x i64> @llrint_v3i64_v3bf16(<3 x bfloat> %x) { ; CHECK-LABEL: llrint_v3i64_v3bf16: @@ -235,7 +217,6 @@ define <3 x i64> @llrint_v3i64_v3bf16(<3 x bfloat> %x) { %a = call <3 x i64> @llvm.llrint.v3i64.v3bf16(<3 x bfloat> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llrint.v3i64.v3bf16(<3 x bfloat>) define <4 x i64> @llrint_v4i64_v4bf16(<4 x bfloat> %x) { ; CHECK-LABEL: llrint_v4i64_v4bf16: @@ -248,7 +229,6 @@ define <4 x i64> @llrint_v4i64_v4bf16(<4 x bfloat> %x) { %a = call <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llrint.v4i64.v4bf16(<4 x bfloat>) define <8 x i64> @llrint_v8i64_v8bf16(<8 x bfloat> %x) { ; CHECK-LABEL: llrint_v8i64_v8bf16: @@ -261,7 +241,6 @@ define <8 x i64> @llrint_v8i64_v8bf16(<8 x bfloat> %x) { %a = call <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llrint.v8i64.v8bf16(<8 x bfloat>) define <16 x i64> @llrint_v16i64_v16bf16(<16 x bfloat> %x) { ; CHECK-LABEL: llrint_v16i64_v16bf16: @@ -274,4 +253,3 @@ define <16 x i64> @llrint_v16i64_v16bf16(<16 x bfloat> %x) { %a = call <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llrint.v16i64.v16bf16(<16 x bfloat>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll index 5751759ddd9cb..9de58469479ff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll @@ -27,7 +27,6 @@ define <1 x i64> @llround_v1f16(<1 x half> %x) nounwind { %a = call <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half>) define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind { ; RV32-LABEL: llround_v2f16: @@ -52,7 +51,6 @@ define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind { %a = call <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half>) define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind { ; RV32-LABEL: llround_v3f16: @@ -77,7 +75,6 @@ define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind { %a = call <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half>) define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind { ; RV32-LABEL: llround_v4f16: @@ -102,7 +99,6 @@ define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind { %a = call <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half>) define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind { ; RV32-LABEL: llround_v8f16: @@ -127,7 +123,6 @@ define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind { %a = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half>) define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind { ; RV32-LABEL: llround_v16f16: @@ -152,7 +147,6 @@ define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind { %a = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half>) define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind { ; RV32-LABEL: llround_v1i64_v1f32: @@ -175,7 +169,6 @@ define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind { %a = call <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>) define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind { ; RV32-LABEL: llround_v2i64_v2f32: @@ -198,7 +191,6 @@ define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind { %a = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>) define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind { ; RV32-LABEL: llround_v3i64_v3f32: @@ -221,7 +213,6 @@ define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind { %a = call <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float> %x) ret <3 x i64> %a } -declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>) define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind { ; RV32-LABEL: llround_v4i64_v4f32: @@ -244,7 +235,6 @@ define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind { %a = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>) define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind { ; RV32-LABEL: llround_v8i64_v8f32: @@ -267,7 +257,6 @@ define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind { %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>) define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind { ; RV32-LABEL: llround_v16i64_v16f32: @@ -290,7 +279,6 @@ define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind { %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x) ret <16 x i64> %a } -declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>) define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind { ; RV32-LABEL: llround_v1i64_v1f64: @@ -311,7 +299,6 @@ define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind { %a = call <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double> %x) ret <1 x i64> %a } -declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>) define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind { ; RV32-LABEL: llround_v2i64_v2f64: @@ -332,7 +319,6 @@ define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind { %a = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> %x) ret <2 x i64> %a } -declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>) define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind { ; RV32-LABEL: llround_v4i64_v4f64: @@ -353,7 +339,6 @@ define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind { %a = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> %x) ret <4 x i64> %a } -declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>) define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind { ; RV32-LABEL: llround_v8i64_v8f64: @@ -374,4 +359,3 @@ define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind { %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x) ret <8 x i64> %a } -declare <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll index 5b5163c17a5c9..613e3e1618732 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint-vp.ll @@ -28,7 +28,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x, <1 x i1> %m, i32 zeroext %evl) { %a = call <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f32(<1 x float> %x, <1 x i1> %m, i32 %evl) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f32(<1 x float>, <1 x i1>, i32) define <2 x iXLen> @lrint_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v2f32: @@ -52,7 +51,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { %a = call <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f32(<2 x float> %x, <2 x i1> %m, i32 %evl) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f32(<2 x float>, <2 x i1>, i32) define <3 x iXLen> @lrint_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v3f32: @@ -76,7 +74,6 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x, <3 x i1> %m, i32 zeroext %evl) { %a = call <3 x iXLen> @llvm.vp.lrint.v3iXLen.v3f32(<3 x float> %x, <3 x i1> %m, i32 %evl) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.vp.lrint.v3iXLen.v3f32(<3 x float>, <3 x i1>, i32) define <4 x iXLen> @lrint_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v4f32: @@ -100,7 +97,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { %a = call <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f32(<4 x float> %x, <4 x i1> %m, i32 %evl) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f32(<4 x float>, <4 x i1>, i32) define <8 x iXLen> @lrint_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v8f32: @@ -124,7 +120,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { %a = call <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f32(<8 x float> %x, <8 x i1> %m, i32 %evl) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f32(<8 x float>, <8 x i1>, i32) define <16 x iXLen> @lrint_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v16f32: @@ -148,7 +143,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %ev %a = call <16 x iXLen> @llvm.vp.lrint.v16iXLen.v16f32(<16 x float> %x, <16 x i1> %m, i32 %evl) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.vp.lrint.v16iXLen.v16f32(<16 x float>, <16 x i1>, i32) define <1 x iXLen> @lrint_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v1f64: @@ -173,7 +167,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x, <1 x i1> %m, i32 zeroext %evl) %a = call <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f64(<1 x double> %x, <1 x i1> %m, i32 %evl) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.vp.lrint.v1iXLen.v1f64(<1 x double>, <1 x i1>, i32) define <2 x iXLen> @lrint_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v2f64: @@ -198,7 +191,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) %a = call <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f64(<2 x double> %x, <2 x i1> %m, i32 %evl) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.vp.lrint.v2iXLen.v2f64(<2 x double>, <2 x i1>, i32) define <4 x iXLen> @lrint_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v4f64: @@ -223,7 +215,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) %a = call <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f64(<4 x double> %x, <4 x i1> %m, i32 %evl) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.vp.lrint.v4iXLen.v4f64(<4 x double>, <4 x i1>, i32) define <8 x iXLen> @lrint_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_v8f64: @@ -248,4 +239,3 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) %a = call <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f64(<8 x double> %x, <8 x i1> %m, i32 %evl) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.vp.lrint.v8iXLen.v8f64(<8 x double>, <8 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll index a52290072c540..330e9468f1ab6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll @@ -28,7 +28,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { ; RV32-LABEL: lrint_v2f32: @@ -52,7 +51,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>) define <3 x iXLen> @lrint_v3f32(<3 x float> %x) { ; RV32-LABEL: lrint_v3f32: @@ -76,7 +74,6 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x) { %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float>) define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { ; RV32-LABEL: lrint_v4f32: @@ -100,7 +97,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { ; RV32-LABEL: lrint_v8f32: @@ -124,7 +120,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { ; RV32-LABEL: lrint_v16f32: @@ -148,7 +143,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { ; RV32-LABEL: lrint_v1f64: @@ -173,7 +167,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { ; RV32-LABEL: lrint_v2f64: @@ -198,7 +191,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { ; RV32-LABEL: lrint_v4f64: @@ -223,7 +215,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { ; RV32-LABEL: lrint_v8f64: @@ -248,7 +239,6 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>) define <1 x iXLen> @lrint_v1f16(<1 x half> %x) { ; RV32-LABEL: lrint_v1f16: @@ -277,7 +267,6 @@ define <1 x iXLen> @lrint_v1f16(<1 x half> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>) define <2 x iXLen> @lrint_v2f16(<2 x half> %x) { ; RV32-LABEL: lrint_v2f16: @@ -306,7 +295,6 @@ define <2 x iXLen> @lrint_v2f16(<2 x half> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>) define <3 x iXLen> @lrint_v3f16(<3 x half> %x) { ; RV32-LABEL: lrint_v3f16: @@ -335,7 +323,6 @@ define <3 x iXLen> @lrint_v3f16(<3 x half> %x) { %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f16(<3 x half> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f16(<3 x half>) define <4 x iXLen> @lrint_v4f16(<4 x half> %x) { ; RV32-LABEL: lrint_v4f16: @@ -364,7 +351,6 @@ define <4 x iXLen> @lrint_v4f16(<4 x half> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>) define <8 x iXLen> @lrint_v8f16(<8 x half> %x) { ; RV32-LABEL: lrint_v8f16: @@ -393,7 +379,6 @@ define <8 x iXLen> @lrint_v8f16(<8 x half> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>) define <16 x iXLen> @lrint_v16f16(<16 x half> %x) { ; RV32-LABEL: lrint_v16f16: @@ -422,7 +407,6 @@ define <16 x iXLen> @lrint_v16f16(<16 x half> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>) define <1 x iXLen> @lrint_v1bf16(<1 x bfloat> %x) { ; RV32-LABEL: lrint_v1bf16: @@ -451,7 +435,6 @@ define <1 x iXLen> @lrint_v1bf16(<1 x bfloat> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1bf16(<1 x bfloat> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1bf16(<1 x bfloat>) define <2 x iXLen> @lrint_v2bf16(<2 x bfloat> %x) { ; RV32-LABEL: lrint_v2bf16: @@ -480,7 +463,6 @@ define <2 x iXLen> @lrint_v2bf16(<2 x bfloat> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2bf16(<2 x bfloat> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2bf16(<2 x bfloat>) define <3 x iXLen> @lrint_v3bf16(<3 x bfloat> %x) { ; RV32-LABEL: lrint_v3bf16: @@ -509,7 +491,6 @@ define <3 x iXLen> @lrint_v3bf16(<3 x bfloat> %x) { %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3bf16(<3 x bfloat> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lrint.v3iXLen.v3bf16(<3 x bfloat>) define <4 x iXLen> @lrint_v4bf16(<4 x bfloat> %x) { ; RV32-LABEL: lrint_v4bf16: @@ -538,7 +519,6 @@ define <4 x iXLen> @lrint_v4bf16(<4 x bfloat> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4bf16(<4 x bfloat> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4bf16(<4 x bfloat>) define <8 x iXLen> @lrint_v8bf16(<8 x bfloat> %x) { ; RV32-LABEL: lrint_v8bf16: @@ -567,7 +547,6 @@ define <8 x iXLen> @lrint_v8bf16(<8 x bfloat> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8bf16(<8 x bfloat> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8bf16(<8 x bfloat>) define <16 x iXLen> @lrint_v16bf16(<16 x bfloat> %x) { ; RV32-LABEL: lrint_v16bf16: @@ -596,7 +575,6 @@ define <16 x iXLen> @lrint_v16bf16(<16 x bfloat> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16bf16(<16 x bfloat> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16bf16(<16 x bfloat>) define <32 x iXLen> @lrint_v32bf16(<32 x bfloat> %x) { ; RV32-LABEL: lrint_v32bf16: @@ -633,4 +611,3 @@ define <32 x iXLen> @lrint_v32bf16(<32 x bfloat> %x) { %a = call <32 x iXLen> @llvm.lrint.v32iXLen.v32bf16(<32 x bfloat> %x) ret <32 x iXLen> %a } -declare <32 x iXLen> @llvm.lrint.v32iXLen.v32bf16(<32 x bfloat>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll index 64b3b7912ed32..d1ef02665016a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll @@ -39,7 +39,6 @@ define <1 x iXLen> @lround_v1f16(<1 x half> %x) nounwind { %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half>) define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind { ; RV32-LABEL: lround_v2f16: @@ -74,7 +73,6 @@ define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind { %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half>) define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind { ; RV32-LABEL: lround_v3f16: @@ -109,7 +107,6 @@ define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind { %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half>) define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind { ; RV32-LABEL: lround_v4f16: @@ -144,7 +141,6 @@ define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind { %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half>) define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind { ; RV32-LABEL: lround_v8f16: @@ -179,7 +175,6 @@ define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind { %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half>) define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind { ; RV32-LABEL: lround_v16f16: @@ -214,7 +209,6 @@ define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind { %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half>) define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind { ; RV32-LABEL: lround_v1f32: @@ -244,7 +238,6 @@ define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind { %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind { ; RV32-LABEL: lround_v2f32: @@ -274,7 +267,6 @@ define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind { %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>) define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind { ; RV32-LABEL: lround_v3f32: @@ -304,7 +296,6 @@ define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind { %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float> %x) ret <3 x iXLen> %a } -declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>) define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind { ; RV32-LABEL: lround_v4f32: @@ -334,7 +325,6 @@ define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind { %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind { ; RV32-LABEL: lround_v8f32: @@ -364,7 +354,6 @@ define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind { %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind { ; RV32-LABEL: lround_v16f32: @@ -394,7 +383,6 @@ define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind { %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind { ; RV32-LABEL: lround_v1f64: @@ -425,7 +413,6 @@ define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind { %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind { ; RV32-LABEL: lround_v2f64: @@ -456,7 +443,6 @@ define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind { %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind { ; RV32-LABEL: lround_v4f64: @@ -487,7 +473,6 @@ define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind { %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind { ; RV32-LABEL: lround_v8f64: @@ -518,7 +503,6 @@ define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind { %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double>) define <32 x iXLen> @lround_v32bf16(<32 x bfloat> %x) { ; RV32-LABEL: lround_v32bf16: @@ -561,4 +545,3 @@ define <32 x iXLen> @lround_v32bf16(<32 x bfloat> %x) { %a = call <32 x iXLen> @llvm.lround.v32iXLen.v32bf16(<32 x bfloat> %x) ret <32 x iXLen> %a } -declare <32 x iXLen> @llvm.lround.v32iXLen.v32bf16(<32 x bfloat>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll index 38e78de3575b3..edefb8e1a8ae4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare <1 x i1> @llvm.vp.and.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v1i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.and.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v2i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.and.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v4i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.and.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.and.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v16i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex ret <16 x i1> %v } -declare <1 x i1> @llvm.vp.or.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v1i1: ; CHECK: # %bb.0: @@ -76,8 +64,6 @@ define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.or.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v2i1: ; CHECK: # %bb.0: @@ -88,8 +74,6 @@ define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.or.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v4i1: ; CHECK: # %bb.0: @@ -100,8 +84,6 @@ define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.or.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v8i1: ; CHECK: # %bb.0: @@ -112,8 +94,6 @@ define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.or.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: @@ -124,8 +104,6 @@ define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext ret <16 x i1> %v } -declare <1 x i1> @llvm.vp.xor.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v1i1: ; CHECK: # %bb.0: @@ -136,8 +114,6 @@ define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.xor.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v2i1: ; CHECK: # %bb.0: @@ -148,8 +124,6 @@ define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.xor.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v4i1: ; CHECK: # %bb.0: @@ -160,8 +134,6 @@ define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.xor.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v8i1: ; CHECK: # %bb.0: @@ -172,8 +144,6 @@ define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.xor.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v16i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll index 7e6f2c76e5881..f3cea49ce7946 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -17,8 +17,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+zve32f,+zvl128b -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32F,RV64ZVE32F-ZVFHMIN -declare <1 x i8> @llvm.masked.gather.v1i8.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i8>) - define <1 x i8> @mgather_v1i8(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i8> %passthru) { ; RV32V-LABEL: mgather_v1i8: ; RV32V: # %bb.0: @@ -55,8 +53,6 @@ define <1 x i8> @mgather_v1i8(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i8> %passthru) ret <1 x i8> %v } -declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>) - define <2 x i8> @mgather_v2i8(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i8> %passthru) { ; RV32V-LABEL: mgather_v2i8: ; RV32V: # %bb.0: @@ -449,8 +445,6 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x ret <2 x i64> %ev } -declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>) - define <4 x i8> @mgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i8> %passthru) { ; RV32-LABEL: mgather_v4i8: ; RV32: # %bb.0: @@ -573,8 +567,6 @@ define <4 x i8> @mgather_falsemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) { ret <4 x i8> %v } -declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>) - define <8 x i8> @mgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i8> %passthru) { ; RV32-LABEL: mgather_v8i8: ; RV32: # %bb.0: @@ -810,8 +802,6 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8 ret <8 x i8> %v } -declare <1 x i16> @llvm.masked.gather.v1i16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i16>) - define <1 x i16> @mgather_v1i16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i16> %passthru) { ; RV32V-LABEL: mgather_v1i16: ; RV32V: # %bb.0: @@ -848,8 +838,6 @@ define <1 x i16> @mgather_v1i16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i16> %passthr ret <1 x i16> %v } -declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>) - define <2 x i16> @mgather_v2i16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32V-LABEL: mgather_v2i16: ; RV32V: # %bb.0: @@ -1138,8 +1126,6 @@ define <2 x i64> @mgather_v2i16_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 ret <2 x i64> %ev } -declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>) - define <4 x i16> @mgather_v4i16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i16> %passthru) { ; RV32-LABEL: mgather_v4i16: ; RV32: # %bb.0: @@ -1262,8 +1248,6 @@ define <4 x i16> @mgather_falsemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) ret <4 x i16> %v } -declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>) - define <8 x i16> @mgather_v8i16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_v8i16: ; RV32: # %bb.0: @@ -1938,8 +1922,6 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m, ret <8 x i16> %v } -declare <1 x i32> @llvm.masked.gather.v1i32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i32>) - define <1 x i32> @mgather_v1i32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i32> %passthru) { ; RV32V-LABEL: mgather_v1i32: ; RV32V: # %bb.0: @@ -1976,8 +1958,6 @@ define <1 x i32> @mgather_v1i32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i32> %passthr ret <1 x i32> %v } -declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>) - define <2 x i32> @mgather_v2i32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i32> %passthru) { ; RV32V-LABEL: mgather_v2i32: ; RV32V: # %bb.0: @@ -2154,8 +2134,6 @@ define <2 x i64> @mgather_v2i32_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 ret <2 x i64> %ev } -declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>) - define <4 x i32> @mgather_v4i32(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i32> %passthru) { ; RV32-LABEL: mgather_v4i32: ; RV32: # %bb.0: @@ -2277,8 +2255,6 @@ define <4 x i32> @mgather_falsemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) ret <4 x i32> %v } -declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>) - define <8 x i32> @mgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i32> %passthru) { ; RV32-LABEL: mgather_v8i32: ; RV32: # %bb.0: @@ -3391,8 +3367,6 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m, ret <8 x i32> %v } -declare <1 x i64> @llvm.masked.gather.v1i64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i64>) - define <1 x i64> @mgather_v1i64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i64> %passthru) { ; RV32V-LABEL: mgather_v1i64: ; RV32V: # %bb.0: @@ -3435,8 +3409,6 @@ define <1 x i64> @mgather_v1i64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i64> %passthr ret <1 x i64> %v } -declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>) - define <2 x i64> @mgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %passthru) { ; RV32V-LABEL: mgather_v2i64: ; RV32V: # %bb.0: @@ -3508,8 +3480,6 @@ define <2 x i64> @mgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %passthr ret <2 x i64> %v } -declare <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i64>) - define <4 x i64> @mgather_v4i64(<4 x ptr> %ptrs, <4 x i1> %m, <4 x i64> %passthru) { ; RV32V-LABEL: mgather_v4i64: ; RV32V: # %bb.0: @@ -3748,8 +3718,6 @@ define <4 x i64> @mgather_falsemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) ret <4 x i64> %v } -declare <8 x i64> @llvm.masked.gather.v8i64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i64>) - define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_v8i64: ; RV32V: # %bb.0: @@ -6827,8 +6795,6 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m, ret <8 x i64> %v } -declare <1 x bfloat> @llvm.masked.gather.v1bf16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x bfloat>) - define <1 x bfloat> @mgather_v1bf16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x bfloat> %passthru) { ; RV32V-LABEL: mgather_v1bf16: ; RV32V: # %bb.0: @@ -6865,8 +6831,6 @@ define <1 x bfloat> @mgather_v1bf16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x bfloat> % ret <1 x bfloat> %v } -declare <2 x bfloat> @llvm.masked.gather.v2bf16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x bfloat>) - define <2 x bfloat> @mgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x bfloat> %passthru) { ; RV32V-LABEL: mgather_v2bf16: ; RV32V: # %bb.0: @@ -6917,8 +6881,6 @@ define <2 x bfloat> @mgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x bfloat> % ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.masked.gather.v4bf16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x bfloat>) - define <4 x bfloat> @mgather_v4bf16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x bfloat> %passthru) { ; RV32-LABEL: mgather_v4bf16: ; RV32: # %bb.0: @@ -7041,8 +7003,6 @@ define <4 x bfloat> @mgather_falsemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %pas ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x bfloat>) - define <8 x bfloat> @mgather_v8bf16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x bfloat> %passthru) { ; RV32-LABEL: mgather_v8bf16: ; RV32: # %bb.0: @@ -7717,8 +7677,6 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1> ret <8 x bfloat> %v } -declare <1 x half> @llvm.masked.gather.v1f16.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x half>) - define <1 x half> @mgather_v1f16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x half> %passthru) { ; RV32V-LABEL: mgather_v1f16: ; RV32V: # %bb.0: @@ -7755,8 +7713,6 @@ define <1 x half> @mgather_v1f16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x half> %passt ret <1 x half> %v } -declare <2 x half> @llvm.masked.gather.v2f16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x half>) - define <2 x half> @mgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x half> %passthru) { ; RV32V-LABEL: mgather_v2f16: ; RV32V: # %bb.0: @@ -7832,8 +7788,6 @@ define <2 x half> @mgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, <2 x half> %passt ret <2 x half> %v } -declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>) - define <4 x half> @mgather_v4f16(<4 x ptr> %ptrs, <4 x i1> %m, <4 x half> %passthru) { ; RV32-LABEL: mgather_v4f16: ; RV32: # %bb.0: @@ -8022,8 +7976,6 @@ define <4 x half> @mgather_falsemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru ret <4 x half> %v } -declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>) - define <8 x half> @mgather_v8f16(<8 x ptr> %ptrs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_v8f16: ; RV32: # %bb.0: @@ -9256,8 +9208,6 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ret <8 x half> %v } -declare <1 x float> @llvm.masked.gather.v1f32.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x float>) - define <1 x float> @mgather_v1f32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x float> %passthru) { ; RV32V-LABEL: mgather_v1f32: ; RV32V: # %bb.0: @@ -9294,8 +9244,6 @@ define <1 x float> @mgather_v1f32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x float> %pas ret <1 x float> %v } -declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>) - define <2 x float> @mgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x float> %passthru) { ; RV32V-LABEL: mgather_v2f32: ; RV32V: # %bb.0: @@ -9346,8 +9294,6 @@ define <2 x float> @mgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, <2 x float> %pas ret <2 x float> %v } -declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>) - define <4 x float> @mgather_v4f32(<4 x ptr> %ptrs, <4 x i1> %m, <4 x float> %passthru) { ; RV32-LABEL: mgather_v4f32: ; RV32: # %bb.0: @@ -9469,8 +9415,6 @@ define <4 x float> @mgather_falsemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passth ret <4 x float> %v } -declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>) - define <8 x float> @mgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x float> %passthru) { ; RV32-LABEL: mgather_v8f32: ; RV32: # %bb.0: @@ -10583,8 +10527,6 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> % ret <8 x float> %v } -declare <1 x double> @llvm.masked.gather.v1f64.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x double>) - define <1 x double> @mgather_v1f64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x double> %passthru) { ; RV32V-LABEL: mgather_v1f64: ; RV32V: # %bb.0: @@ -10625,8 +10567,6 @@ define <1 x double> @mgather_v1f64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x double> %p ret <1 x double> %v } -declare <2 x double> @llvm.masked.gather.v2f64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x double>) - define <2 x double> @mgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x double> %passthru) { ; RV32V-LABEL: mgather_v2f64: ; RV32V: # %bb.0: @@ -10688,8 +10628,6 @@ define <2 x double> @mgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x double> %p ret <2 x double> %v } -declare <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x double>) - define <4 x double> @mgather_v4f64(<4 x ptr> %ptrs, <4 x i1> %m, <4 x double> %passthru) { ; RV32V-LABEL: mgather_v4f64: ; RV32V: # %bb.0: @@ -10882,8 +10820,6 @@ define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %pass ret <4 x double> %v } -declare <8 x double> @llvm.masked.gather.v8f64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x double>) - define <8 x double> @mgather_v8f64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_v8f64: ; RV32V: # %bb.0: @@ -13247,8 +13183,6 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1> ret <8 x double> %v } -declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>) - define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m, <16 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v16i8: ; RV32: # %bb.0: @@ -13470,8 +13404,6 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m ret <16 x i8> %v } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>) - define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m, <32 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v32i8: ; RV32: # %bb.0: @@ -13904,7 +13836,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m ret <32 x i8> %v } - define <4 x i32> @mgather_broadcast_load_unmasked(ptr %base) { ; CHECK-LABEL: mgather_broadcast_load_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll index e86fae6d501e5..c5e874a6f8f91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -17,8 +17,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+zve32f,+zvl128b -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32F,RV64ZVE32F-ZVFHMIN -declare void @llvm.masked.scatter.v1i8.v1p0(<1 x i8>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i8(<1 x i8> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i8: ; RV32V: # %bb.0: @@ -52,8 +50,6 @@ define void @mscatter_v1i8(<1 x i8> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i8(<2 x i8> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i8: ; RV32V: # %bb.0: @@ -267,8 +263,6 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i8: ; RV32: # %bb.0: @@ -369,8 +363,6 @@ define void @mscatter_falsemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i8(<8 x i8> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i8: ; RV32: # %bb.0: @@ -586,8 +578,6 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8 ret void } -declare void @llvm.masked.scatter.v1i16.v1p0(<1 x i16>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i16(<1 x i16> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i16: ; RV32V: # %bb.0: @@ -621,8 +611,6 @@ define void @mscatter_v1i16(<1 x i16> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i16(<2 x i16> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i16: ; RV32V: # %bb.0: @@ -778,8 +766,6 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i16: ; RV32: # %bb.0: @@ -880,8 +866,6 @@ define void @mscatter_falsemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i16(<8 x i16> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i16: ; RV32: # %bb.0: @@ -1491,8 +1475,6 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs, ret void } -declare void @llvm.masked.scatter.v1i32.v1p0(<1 x i32>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i32(<1 x i32> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i32: ; RV32V: # %bb.0: @@ -1526,8 +1508,6 @@ define void @mscatter_v1i32(<1 x i32> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i32(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i32: ; RV32V: # %bb.0: @@ -1627,8 +1607,6 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i32: ; RV32: # %bb.0: @@ -1729,8 +1707,6 @@ define void @mscatter_falsemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i32(<8 x i32> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i32: ; RV32: # %bb.0: @@ -2765,8 +2741,6 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs, ret void } -declare void @llvm.masked.scatter.v1i64.v1p0(<1 x i64>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1i64(<1 x i64> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i64: ; RV32V: # %bb.0: @@ -2806,8 +2780,6 @@ define void @mscatter_v1i64(<1 x i64> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i64: ; RV32V: # %bb.0: @@ -2873,8 +2845,6 @@ define void @mscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32V-LABEL: mscatter_v4i64: ; RV32V: # %bb.0: @@ -3056,8 +3026,6 @@ define void @mscatter_falsemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8i64.v8p0(<8 x i64>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_v8i64: ; RV32V: # %bb.0: @@ -5868,8 +5836,6 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs, ret void } -declare void @llvm.masked.scatter.v1bf16.v1p0(<1 x bfloat>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1bf16(<1 x bfloat> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1bf16: ; RV32V: # %bb.0: @@ -5905,8 +5871,6 @@ define void @mscatter_v1bf16(<1 x bfloat> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2bf16.v2p0(<2 x bfloat>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2bf16: ; RV32V: # %bb.0: @@ -5955,8 +5919,6 @@ define void @mscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4bf16.v4p0(<4 x bfloat>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4bf16: ; RV32: # %bb.0: @@ -6073,8 +6035,6 @@ define void @mscatter_falsemask_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8bf16.v8p0(<8 x bfloat>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8bf16(<8 x bfloat> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8bf16: ; RV32: # %bb.0: @@ -6766,8 +6726,6 @@ define void @mscatter_baseidx_v8bf16(<8 x bfloat> %val, ptr %base, <8 x i16> %id ret void } -declare void @llvm.masked.scatter.v1f16.v1p0(<1 x half>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1f16(<1 x half> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f16: ; RV32V: # %bb.0: @@ -6814,8 +6772,6 @@ define void @mscatter_v1f16(<1 x half> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f16: ; RV32V: # %bb.0: @@ -6886,8 +6842,6 @@ define void @mscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4f16(<4 x half> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4f16: ; RV32: # %bb.0: @@ -7064,8 +7018,6 @@ define void @mscatter_falsemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8f16(<8 x half> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8f16: ; RV32: # %bb.0: @@ -8260,8 +8212,6 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs, ret void } -declare void @llvm.masked.scatter.v1f32.v1p0(<1 x float>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1f32(<1 x float> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f32: ; RV32V: # %bb.0: @@ -8295,8 +8245,6 @@ define void @mscatter_v1f32(<1 x float> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f32: ; RV32V: # %bb.0: @@ -8341,8 +8289,6 @@ define void @mscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4f32(<4 x float> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4f32: ; RV32: # %bb.0: @@ -8443,8 +8389,6 @@ define void @mscatter_falsemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8f32: ; RV32: # %bb.0: @@ -9479,8 +9423,6 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs ret void } -declare void @llvm.masked.scatter.v1f64.v1p0(<1 x double>, <1 x ptr>, i32, <1 x i1>) - define void @mscatter_v1f64(<1 x double> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f64: ; RV32V: # %bb.0: @@ -9519,8 +9461,6 @@ define void @mscatter_v1f64(<1 x double> %val, <1 x ptr> %ptrs, <1 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f64: ; RV32V: # %bb.0: @@ -9580,8 +9520,6 @@ define void @mscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ret void } -declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4f64(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32V-LABEL: mscatter_v4f64: ; RV32V: # %bb.0: @@ -9731,8 +9669,6 @@ define void @mscatter_falsemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs) { ret void } -declare void @llvm.masked.scatter.v8f64.v8p0(<8 x double>, <8 x ptr>, i32, <8 x i1>) - define void @mscatter_v8f64(<8 x double> %val, <8 x ptr> %ptrs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_v8f64: ; RV32V: # %bb.0: @@ -11925,8 +11861,6 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %idx ret void } -declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>) - define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs, <16 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v16i8: ; RV32: # %bb.0: @@ -12128,8 +12062,6 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs, ret void } -declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32, <32 x i1>) - define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs, <32 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v32i8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll index 08da7d6bc50f7..57c94830fc606 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f16: ; CHECK: # %bb.0: @@ -48,8 +46,6 @@ define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) ret <2 x half> %v } -declare <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f16: ; CHECK: # %bb.0: @@ -92,8 +88,6 @@ define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f16: ; CHECK: # %bb.0: @@ -136,8 +130,6 @@ define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) ret <8 x half> %v } -declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f16: ; CHECK: # %bb.0: @@ -182,8 +174,6 @@ define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %e ret <16 x half> %v } -declare <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_nearbyint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f32: ; CHECK: # %bb.0: @@ -224,8 +214,6 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev ret <2 x float> %v } -declare <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_nearbyint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f32: ; CHECK: # %bb.0: @@ -266,8 +254,6 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev ret <4 x float> %v } -declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f32: ; CHECK: # %bb.0: @@ -310,8 +296,6 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev ret <8 x float> %v } -declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f32: ; CHECK: # %bb.0: @@ -354,8 +338,6 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext ret <16 x float> %v } -declare <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v2f64: ; RV32: # %bb.0: @@ -430,8 +412,6 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext % ret <2 x double> %v } -declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v4f64: ; RV32: # %bb.0: @@ -510,8 +490,6 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext % ret <4 x double> %v } -declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v8f64: ; RV32: # %bb.0: @@ -590,8 +568,6 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext % ret <8 x double> %v } -declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v15f64: ; RV32: # %bb.0: @@ -670,8 +646,6 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex ret <15 x double> %v } -declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v16f64: ; RV32: # %bb.0: @@ -750,8 +724,6 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex ret <16 x double> %v } -declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_nearbyint_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll index 016be04ffc9b9..287752759a06b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll @@ -1,13 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvl256b | FileCheck %s -declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - ; Test binary operator with vp.merge and vp.smax. -declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd: ; CHECK: # %bb.0: @@ -20,7 +14,6 @@ define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, } ; Test glued node of merge should not be deleted. -declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: @@ -48,7 +41,6 @@ define <8 x i32> @vpmerge_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y } ; Test float binary operator with vp.merge and vp.fadd. -declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x float> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfadd: ; CHECK: # %bb.0: @@ -61,7 +53,6 @@ define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x f } ; Test conversion by fptosi. -declare <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float>, <8 x i1>, i32) define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptosi: ; CHECK: # %bb.0: @@ -74,7 +65,6 @@ define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> } ; Test conversion by sitofp. -declare <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpsitofp: ; CHECK: # %bb.0: @@ -87,7 +77,6 @@ define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i } ; Test integer extension by vp.zext. -declare <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8>, <8 x i1>, i32) define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpzext: ; CHECK: # %bb.0: @@ -100,7 +89,6 @@ define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, } ; Test integer truncation by vp.trunc. -declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: @@ -113,7 +101,6 @@ define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m } ; Test integer extension by vp.fpext. -declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfpext: ; CHECK: # %bb.0: @@ -126,7 +113,6 @@ define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 } ; Test integer truncation by vp.trunc. -declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32) define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptrunc: ; CHECK: # %bb.0: @@ -139,7 +125,6 @@ define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 } ; Test load operation by vp.load. -declare <8 x i32> @llvm.vp.load.v8i32.p0(ptr, <8 x i1>, i32) define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, ptr %p, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload: @@ -167,11 +152,6 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, ptr %p, <8 x i32> %x, <8 ret <8 x i32> %b } -declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - ; Test binary operator with vp.select and vp.add. define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpadd: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll index 754941eb93e01..4da6e103603ce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll @@ -324,7 +324,6 @@ define i32 @reduce_sum_16xi32_prefix13(ptr %p) { ret i32 %add11 } - define i32 @reduce_sum_16xi32_prefix14(ptr %p) { ; CHECK-LABEL: reduce_sum_16xi32_prefix14: ; CHECK: # %bb.0: @@ -586,11 +585,6 @@ define i32 @reduce_or_16xi32_prefix5(ptr %p) { ret i32 %or3 } -declare i32 @llvm.smax.i32(i32 %a, i32 %b) -declare i32 @llvm.smin.i32(i32 %a, i32 %b) -declare i32 @llvm.umax.i32(i32 %a, i32 %b) -declare i32 @llvm.umin.i32(i32 %a, i32 %b) - define i32 @reduce_smax_16xi32_prefix2(ptr %p) { ; CHECK-LABEL: reduce_smax_16xi32_prefix2: ; CHECK: # %bb.0: @@ -849,7 +843,6 @@ define float @reduce_fadd_2xf32_ninf_only(ptr %p) { ret float %fadd0 } - ; Negative test - last fadd is not associative define float @reduce_fadd_4xi32_non_associative(ptr %p) { ; CHECK-LABEL: reduce_fadd_4xi32_non_associative: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll index 0f5cccd8cf2e2..ca9b24e60e503 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.vp.reduce.fadd.v2f16(half, <2 x half>, <2 x i1>, i32) - define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f16: ; CHECK: # %bb.0: @@ -32,8 +30,6 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 ze ret half %r } -declare half @llvm.vp.reduce.fadd.v4f16(half, <4 x half>, <4 x i1>, i32) - define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f16: ; CHECK: # %bb.0: @@ -60,8 +56,6 @@ define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 ze ret half %r } -declare float @llvm.vp.reduce.fadd.v2f32(float, <2 x float>, <2 x i1>, i32) - define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f32: ; CHECK: # %bb.0: @@ -88,8 +82,6 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 ret float %r } -declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f32: ; CHECK: # %bb.0: @@ -116,8 +108,6 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 ret float %r } -declare float @llvm.vp.reduce.fadd.v64f32(float, <64 x float>, <64 x i1>, i32) - define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v64f32: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, ret float %r } -declare double @llvm.vp.reduce.fadd.v2f64(double, <2 x double>, <2 x i1>, i32) - define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f64: ; CHECK: # %bb.0: @@ -202,8 +190,6 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, ret double %r } -declare double @llvm.vp.reduce.fadd.v3f64(double, <3 x double>, <3 x i1>, i32) - define double @vpreduce_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v3f64: ; CHECK: # %bb.0: @@ -230,8 +216,6 @@ define double @vpreduce_ord_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, ret double %r } -declare double @llvm.vp.reduce.fadd.v4f64(double, <4 x double>, <4 x i1>, i32) - define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll index eec12212d0d37..ffbf1c7a548e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>) - define half @vreduce_fadd_v1f16(<1 x half> %v, half %s) { ; CHECK-LABEL: vreduce_fadd_v1f16: ; CHECK: # %bb.0: @@ -27,8 +25,6 @@ define half @vreduce_ord_fadd_v1f16(<1 x half> %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v2f16(half, <2 x half>) - define half @vreduce_fadd_v2f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v2f16: ; CHECK: # %bb.0: @@ -57,8 +53,6 @@ define half @vreduce_ord_fadd_v2f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v4f16(half, <4 x half>) - define half @vreduce_fadd_v4f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v4f16: ; CHECK: # %bb.0: @@ -87,8 +81,6 @@ define half @vreduce_ord_fadd_v4f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v7f16(half, <7 x half>) - define half @vreduce_fadd_v7f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v7f16: ; CHECK: # %bb.0: @@ -103,8 +95,6 @@ define half @vreduce_fadd_v7f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v8f16(half, <8 x half>) - define half @vreduce_fadd_v8f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v8f16: ; CHECK: # %bb.0: @@ -133,8 +123,6 @@ define half @vreduce_ord_fadd_v8f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v16f16(half, <16 x half>) - define half @vreduce_fadd_v16f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v16f16: ; CHECK: # %bb.0: @@ -163,8 +151,6 @@ define half @vreduce_ord_fadd_v16f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>) - define half @vreduce_fadd_v32f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v32f16: ; CHECK: # %bb.0: @@ -195,8 +181,6 @@ define half @vreduce_ord_fadd_v32f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>) - define half @vreduce_fadd_v64f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v64f16: ; CHECK: # %bb.0: @@ -227,8 +211,6 @@ define half @vreduce_ord_fadd_v64f16(ptr %x, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.v128f16(half, <128 x half>) - define half @vreduce_fadd_v128f16(ptr %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v128f16: ; CHECK: # %bb.0: @@ -265,8 +247,6 @@ define half @vreduce_ord_fadd_v128f16(ptr %x, half %s) { ret half %red } -declare float @llvm.vector.reduce.fadd.v1f32(float, <1 x float>) - define float @vreduce_fadd_v1f32(<1 x float> %v, float %s) { ; CHECK-LABEL: vreduce_fadd_v1f32: ; CHECK: # %bb.0: @@ -319,8 +299,6 @@ define float @vreduce_ord_fwadd_v1f32(<1 x half> %v, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>) - define float @vreduce_fadd_v2f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v2f32: ; CHECK: # %bb.0: @@ -383,8 +361,6 @@ define float @vreduce_ord_fwadd_v2f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>) - define float @vreduce_fadd_v4f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v4f32: ; CHECK: # %bb.0: @@ -447,8 +423,6 @@ define float @vreduce_ord_fwadd_v4f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v7f32(float, <7 x float>) - define float @vreduce_fadd_v7f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v7f32: ; CHECK: # %bb.0: @@ -521,9 +495,6 @@ define float @vreduce_fadd_v7f32_neutralstart_fast(ptr %x) { ret float %red } - -declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>) - define float @vreduce_fadd_v8f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v8f32: ; CHECK: # %bb.0: @@ -586,8 +557,6 @@ define float @vreduce_ord_fwadd_v8f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>) - define float @vreduce_fadd_v16f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v16f32: ; CHECK: # %bb.0: @@ -650,8 +619,6 @@ define float @vreduce_ord_fwadd_v16f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>) - define float @vreduce_fadd_v32f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v32f32: ; CHECK: # %bb.0: @@ -718,8 +685,6 @@ define float @vreduce_ord_fwadd_v32f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.v64f32(float, <64 x float>) - define float @vreduce_fadd_v64f32(ptr %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v64f32: ; CHECK: # %bb.0: @@ -801,8 +766,6 @@ define float @vreduce_ord_fwadd_v64f32(ptr %x, float %s) { ret float %red } -declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>) - define double @vreduce_fadd_v1f64(<1 x double> %v, double %s) { ; CHECK-LABEL: vreduce_fadd_v1f64: ; CHECK: # %bb.0: @@ -855,8 +818,6 @@ define double @vreduce_ord_fwadd_v1f64(<1 x float> %v, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>) - define double @vreduce_fadd_v2f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v2f64: ; CHECK: # %bb.0: @@ -919,8 +880,6 @@ define double @vreduce_ord_fwadd_v2f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>) - define double @vreduce_fadd_v4f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v4f64: ; CHECK: # %bb.0: @@ -983,8 +942,6 @@ define double @vreduce_ord_fwadd_v4f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>) - define double @vreduce_fadd_v8f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v8f64: ; CHECK: # %bb.0: @@ -1047,8 +1004,6 @@ define double @vreduce_ord_fwadd_v8f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>) - define double @vreduce_fadd_v16f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v16f64: ; CHECK: # %bb.0: @@ -1111,8 +1066,6 @@ define double @vreduce_ord_fwadd_v16f64(ptr %x, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>) - define double @vreduce_fadd_v32f64(ptr %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v32f64: ; CHECK: # %bb.0: @@ -1190,8 +1143,6 @@ define double @vreduce_ord_fwadd_v32f64(ptr %x, double %s) { ret double %red } -declare half @llvm.vector.reduce.fmin.v2f16(<2 x half>) - define half @vreduce_fmin_v2f16(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v2f16: ; CHECK: # %bb.0: @@ -1205,8 +1156,6 @@ define half @vreduce_fmin_v2f16(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmin.v4f16(<4 x half>) - define half @vreduce_fmin_v4f16(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v4f16: ; CHECK: # %bb.0: @@ -1246,8 +1195,6 @@ define half @vreduce_fmin_v4f16_nonans_noinfs(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmin.v128f16(<128 x half>) - define half @vreduce_fmin_v128f16(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v128f16: ; CHECK: # %bb.0: @@ -1265,8 +1212,6 @@ define half @vreduce_fmin_v128f16(ptr %x) { ret half %red } -declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>) - define float @vreduce_fmin_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v2f32: ; CHECK: # %bb.0: @@ -1280,8 +1225,6 @@ define float @vreduce_fmin_v2f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) - define float @vreduce_fmin_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v4f32: ; CHECK: # %bb.0: @@ -1321,8 +1264,6 @@ define float @vreduce_fmin_v4f32_nonans_noinfs(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmin.v7f32(<7 x float>) - define float @vreduce_fmin_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v7f32: ; CHECK: # %bb.0: @@ -1338,8 +1279,6 @@ define float @vreduce_fmin_v7f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmin.v128f32(<128 x float>) - define float @vreduce_fmin_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v128f32: ; CHECK: # %bb.0: @@ -1363,8 +1302,6 @@ define float @vreduce_fmin_v128f32(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>) - define double @vreduce_fmin_v2f64(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v2f64: ; CHECK: # %bb.0: @@ -1378,8 +1315,6 @@ define double @vreduce_fmin_v2f64(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>) - define double @vreduce_fmin_v4f64(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v4f64: ; CHECK: # %bb.0: @@ -1419,8 +1354,6 @@ define double @vreduce_fmin_v4f64_nonans_noinfs(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmin.v32f64(<32 x double>) - define double @vreduce_fmin_v32f64(ptr %x) { ; CHECK-LABEL: vreduce_fmin_v32f64: ; CHECK: # %bb.0: @@ -1437,8 +1370,6 @@ define double @vreduce_fmin_v32f64(ptr %x) { ret double %red } -declare half @llvm.vector.reduce.fmax.v2f16(<2 x half>) - define half @vreduce_fmax_v2f16(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v2f16: ; CHECK: # %bb.0: @@ -1452,8 +1383,6 @@ define half @vreduce_fmax_v2f16(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>) - define half @vreduce_fmax_v4f16(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v4f16: ; CHECK: # %bb.0: @@ -1493,8 +1422,6 @@ define half @vreduce_fmax_v4f16_nonans_noinfs(ptr %x) { ret half %red } -declare half @llvm.vector.reduce.fmax.v128f16(<128 x half>) - define half @vreduce_fmax_v128f16(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v128f16: ; CHECK: # %bb.0: @@ -1512,8 +1439,6 @@ define half @vreduce_fmax_v128f16(ptr %x) { ret half %red } -declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>) - define float @vreduce_fmax_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v2f32: ; CHECK: # %bb.0: @@ -1527,8 +1452,6 @@ define float @vreduce_fmax_v2f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) - define float @vreduce_fmax_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v4f32: ; CHECK: # %bb.0: @@ -1568,8 +1491,6 @@ define float @vreduce_fmax_v4f32_nonans_noinfs(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmax.v7f32(<7 x float>) - define float @vreduce_fmax_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v7f32: ; CHECK: # %bb.0: @@ -1585,8 +1506,6 @@ define float @vreduce_fmax_v7f32(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmax.v128f32(<128 x float>) - define float @vreduce_fmax_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v128f32: ; CHECK: # %bb.0: @@ -1610,8 +1529,6 @@ define float @vreduce_fmax_v128f32(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fmax.v2f64(<2 x double>) - define double @vreduce_fmax_v2f64(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v2f64: ; CHECK: # %bb.0: @@ -1625,8 +1542,6 @@ define double @vreduce_fmax_v2f64(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>) - define double @vreduce_fmax_v4f64(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v4f64: ; CHECK: # %bb.0: @@ -1666,8 +1581,6 @@ define double @vreduce_fmax_v4f64_nonans_noinfs(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmax.v32f64(<32 x double>) - define double @vreduce_fmax_v32f64(ptr %x) { ; CHECK-LABEL: vreduce_fmax_v32f64: ; CHECK: # %bb.0: @@ -1698,8 +1611,6 @@ define float @vreduce_nsz_fadd_v4f32(ptr %x, float %s) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v2f32(<2 x float>) - define float @vreduce_fminimum_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v2f32: ; CHECK: # %bb.0: @@ -1734,8 +1645,6 @@ define float @vreduce_fminimum_v2f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v4f32(<4 x float>) - define float @vreduce_fminimum_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v4f32: ; CHECK: # %bb.0: @@ -1770,8 +1679,6 @@ define float @vreduce_fminimum_v4f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v7f32(<7 x float>) - define float @vreduce_fminimum_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v7f32: ; CHECK: # %bb.0: @@ -1810,8 +1717,6 @@ define float @vreduce_fminimum_v7f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v8f32(<8 x float>) - define float @vreduce_fminimum_v8f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v8f32: ; CHECK: # %bb.0: @@ -1846,8 +1751,6 @@ define float @vreduce_fminimum_v8f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v16f32(<16 x float>) - define float @vreduce_fminimum_v16f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v16f32: ; CHECK: # %bb.0: @@ -1882,8 +1785,6 @@ define float @vreduce_fminimum_v16f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v32f32(<32 x float>) - define float @vreduce_fminimum_v32f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v32f32: ; CHECK: # %bb.0: @@ -1920,8 +1821,6 @@ define float @vreduce_fminimum_v32f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v64f32(<64 x float>) - define float @vreduce_fminimum_v64f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v64f32: ; CHECK: # %bb.0: @@ -1969,8 +1868,6 @@ define float @vreduce_fminimum_v64f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fminimum.v128f32(<128 x float>) - define float @vreduce_fminimum_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fminimum_v128f32: ; CHECK: # %bb.0: @@ -2080,8 +1977,6 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fminimum.v2f64(<2 x double>) - define double @vreduce_fminimum_v2f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v2f64: ; RV32: # %bb.0: @@ -2133,8 +2028,6 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v4f64(<4 x double>) - define double @vreduce_fminimum_v4f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v4f64: ; RV32: # %bb.0: @@ -2186,8 +2079,6 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v8f64(<8 x double>) - define double @vreduce_fminimum_v8f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v8f64: ; RV32: # %bb.0: @@ -2239,8 +2130,6 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v16f64(<16 x double>) - define double @vreduce_fminimum_v16f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v16f64: ; RV32: # %bb.0: @@ -2292,8 +2181,6 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v32f64(<32 x double>) - define double @vreduce_fminimum_v32f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v32f64: ; RV32: # %bb.0: @@ -2364,8 +2251,6 @@ define double @vreduce_fminimum_v32f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fminimum.v64f64(<64 x double>) - define double @vreduce_fminimum_v64f64(ptr %x) { ; RV32-LABEL: vreduce_fminimum_v64f64: ; RV32: # %bb.0: @@ -2554,8 +2439,6 @@ define double @vreduce_fminimum_v64f64_nonans(ptr %x) { ret double %red } -declare float @llvm.vector.reduce.fmaximum.v2f32(<2 x float>) - define float @vreduce_fmaximum_v2f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v2f32: ; CHECK: # %bb.0: @@ -2590,8 +2473,6 @@ define float @vreduce_fmaximum_v2f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v4f32(<4 x float>) - define float @vreduce_fmaximum_v4f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v4f32: ; CHECK: # %bb.0: @@ -2626,8 +2507,6 @@ define float @vreduce_fmaximum_v4f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v7f32(<7 x float>) - define float @vreduce_fmaximum_v7f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v7f32: ; CHECK: # %bb.0: @@ -2666,8 +2545,6 @@ define float @vreduce_fmaximum_v7f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v8f32(<8 x float>) - define float @vreduce_fmaximum_v8f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v8f32: ; CHECK: # %bb.0: @@ -2702,8 +2579,6 @@ define float @vreduce_fmaximum_v8f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v16f32(<16 x float>) - define float @vreduce_fmaximum_v16f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v16f32: ; CHECK: # %bb.0: @@ -2738,8 +2613,6 @@ define float @vreduce_fmaximum_v16f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v32f32(<32 x float>) - define float @vreduce_fmaximum_v32f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v32f32: ; CHECK: # %bb.0: @@ -2776,8 +2649,6 @@ define float @vreduce_fmaximum_v32f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v64f32(<64 x float>) - define float @vreduce_fmaximum_v64f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v64f32: ; CHECK: # %bb.0: @@ -2825,8 +2696,6 @@ define float @vreduce_fmaximum_v64f32_nonans(ptr %x) { ret float %red } -declare float @llvm.vector.reduce.fmaximum.v128f32(<128 x float>) - define float @vreduce_fmaximum_v128f32(ptr %x) { ; CHECK-LABEL: vreduce_fmaximum_v128f32: ; CHECK: # %bb.0: @@ -2936,8 +2805,6 @@ define float @vreduce_fmaximum_v128f32_nonans(ptr %x) { ret float %red } -declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>) - define double @vreduce_fmaximum_v2f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v2f64: ; RV32: # %bb.0: @@ -2989,8 +2856,6 @@ define double @vreduce_fmaximum_v2f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v4f64(<4 x double>) - define double @vreduce_fmaximum_v4f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v4f64: ; RV32: # %bb.0: @@ -3042,8 +2907,6 @@ define double @vreduce_fmaximum_v4f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v8f64(<8 x double>) - define double @vreduce_fmaximum_v8f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v8f64: ; RV32: # %bb.0: @@ -3095,8 +2958,6 @@ define double @vreduce_fmaximum_v8f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v16f64(<16 x double>) - define double @vreduce_fmaximum_v16f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v16f64: ; RV32: # %bb.0: @@ -3148,8 +3009,6 @@ define double @vreduce_fmaximum_v16f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v32f64(<32 x double>) - define double @vreduce_fmaximum_v32f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v32f64: ; RV32: # %bb.0: @@ -3220,8 +3079,6 @@ define double @vreduce_fmaximum_v32f64_nonans(ptr %x) { ret double %red } -declare double @llvm.vector.reduce.fmaximum.v64f64(<64 x double>) - define double @vreduce_fmaximum_v64f64(ptr %x) { ; RV32-LABEL: vreduce_fmaximum_v64f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll index dfe8f358b7782..3e77020ed0213 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vp.reduce.add.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i8: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.umax.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i8: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smax.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i8: ; CHECK: # %bb.0: @@ -49,8 +43,6 @@ define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.umin.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i8: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smin.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i8: ; CHECK: # %bb.0: @@ -79,8 +69,6 @@ define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.and.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i8: ; CHECK: # %bb.0: @@ -94,8 +82,6 @@ define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.or.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i8: ; CHECK: # %bb.0: @@ -109,8 +95,6 @@ define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 ret i8 %r } -declare i8 @llvm.vp.reduce.xor.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i8: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.umin.v3i8(i8, <3 x i8>, <3 x i1>, i32) - define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v3i8: ; CHECK: # %bb.0: @@ -139,8 +121,6 @@ define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.add.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i8: ; CHECK: # %bb.0: @@ -154,8 +134,6 @@ define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.umax.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i8: ; CHECK: # %bb.0: @@ -169,8 +147,6 @@ define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smax.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i8: ; CHECK: # %bb.0: @@ -184,8 +160,6 @@ define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.umin.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i8: ; CHECK: # %bb.0: @@ -199,8 +173,6 @@ define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.smin.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i8: ; CHECK: # %bb.0: @@ -214,8 +186,6 @@ define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i ret i8 %r } -declare i8 @llvm.vp.reduce.and.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i8: ; CHECK: # %bb.0: @@ -229,8 +199,6 @@ define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.or.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i8: ; CHECK: # %bb.0: @@ -244,8 +212,6 @@ define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 ret i8 %r } -declare i8 @llvm.vp.reduce.xor.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i8: ; CHECK: # %bb.0: @@ -259,8 +225,6 @@ define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i16 @llvm.vp.reduce.add.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i16: ; CHECK: # %bb.0: @@ -274,8 +238,6 @@ define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.umax.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i16: ; CHECK: # %bb.0: @@ -289,8 +251,6 @@ define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smax.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i16: ; CHECK: # %bb.0: @@ -304,8 +264,6 @@ define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.umin.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i16: ; CHECK: # %bb.0: @@ -319,8 +277,6 @@ define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smin.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i16: ; CHECK: # %bb.0: @@ -334,8 +290,6 @@ define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.and.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i16: ; CHECK: # %bb.0: @@ -349,8 +303,6 @@ define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.or.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i16: ; CHECK: # %bb.0: @@ -364,8 +316,6 @@ define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, ret i16 %r } -declare i16 @llvm.vp.reduce.xor.v2i16(i16, <2 x i16>, <2 x i1>, i32) - define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i16: ; CHECK: # %bb.0: @@ -379,8 +329,6 @@ define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.add.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i16: ; CHECK: # %bb.0: @@ -394,8 +342,6 @@ define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.umax.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i16: ; CHECK: # %bb.0: @@ -409,8 +355,6 @@ define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smax.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i16: ; CHECK: # %bb.0: @@ -424,8 +368,6 @@ define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.umin.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i16: ; CHECK: # %bb.0: @@ -439,8 +381,6 @@ define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.smin.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i16: ; CHECK: # %bb.0: @@ -454,8 +394,6 @@ define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> % ret i16 %r } -declare i16 @llvm.vp.reduce.and.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i16: ; CHECK: # %bb.0: @@ -469,8 +407,6 @@ define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m ret i16 %r } -declare i16 @llvm.vp.reduce.or.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i16: ; CHECK: # %bb.0: @@ -484,8 +420,6 @@ define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, ret i16 %r } -declare i16 @llvm.vp.reduce.xor.v4i16(i16, <4 x i16>, <4 x i1>, i32) - define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i16: ; CHECK: # %bb.0: @@ -499,8 +433,6 @@ define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m ret i16 %r } -declare i32 @llvm.vp.reduce.add.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i32: ; CHECK: # %bb.0: @@ -514,8 +446,6 @@ define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.umax.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i32: ; CHECK: # %bb.0: @@ -529,8 +459,6 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smax.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i32: ; CHECK: # %bb.0: @@ -544,8 +472,6 @@ define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.umin.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i32: ; CHECK: # %bb.0: @@ -559,8 +485,6 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smin.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i32: ; CHECK: # %bb.0: @@ -574,8 +498,6 @@ define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.and.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i32: ; CHECK: # %bb.0: @@ -589,8 +511,6 @@ define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.or.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i32: ; CHECK: # %bb.0: @@ -604,8 +524,6 @@ define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, ret i32 %r } -declare i32 @llvm.vp.reduce.xor.v2i32(i32, <2 x i32>, <2 x i1>, i32) - define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i32: ; CHECK: # %bb.0: @@ -619,8 +537,6 @@ define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i32: ; CHECK: # %bb.0: @@ -634,8 +550,6 @@ define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i32: ; CHECK: # %bb.0: @@ -649,8 +563,6 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i32: ; CHECK: # %bb.0: @@ -664,8 +576,6 @@ define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i32: ; CHECK: # %bb.0: @@ -679,8 +589,6 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i32: ; CHECK: # %bb.0: @@ -694,8 +602,6 @@ define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> % ret i32 %r } -declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i32: ; CHECK: # %bb.0: @@ -709,8 +615,6 @@ define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i32: ; CHECK: # %bb.0: @@ -724,8 +628,6 @@ define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, ret i32 %r } -declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i32: ; CHECK: # %bb.0: @@ -739,8 +641,6 @@ define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m ret i32 %r } -declare i32 @llvm.vp.reduce.xor.v64i32(i32, <64 x i32>, <64 x i1>, i32) - define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v64i32: ; CHECK: # %bb.0: @@ -769,8 +669,6 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> ret i32 %r } -declare i64 @llvm.vp.reduce.add.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_v2i64: ; RV32: # %bb.0: @@ -804,8 +702,6 @@ define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.umax.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i64: ; RV32: # %bb.0: @@ -839,8 +735,6 @@ define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smax.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_v2i64: ; RV32: # %bb.0: @@ -874,8 +768,6 @@ define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.umin.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i64: ; RV32: # %bb.0: @@ -909,8 +801,6 @@ define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smin.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_v2i64: ; RV32: # %bb.0: @@ -944,8 +834,6 @@ define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.and.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_v2i64: ; RV32: # %bb.0: @@ -979,8 +867,6 @@ define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.or.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_v2i64: ; RV32: # %bb.0: @@ -1014,8 +900,6 @@ define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, ret i64 %r } -declare i64 @llvm.vp.reduce.xor.v2i64(i64, <2 x i64>, <2 x i1>, i32) - define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_v2i64: ; RV32: # %bb.0: @@ -1049,8 +933,6 @@ define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.add.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_v4i64: ; RV32: # %bb.0: @@ -1084,8 +966,6 @@ define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.umax.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i64: ; RV32: # %bb.0: @@ -1119,8 +999,6 @@ define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smax.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_v4i64: ; RV32: # %bb.0: @@ -1154,8 +1032,6 @@ define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.umin.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i64: ; RV32: # %bb.0: @@ -1189,8 +1065,6 @@ define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.smin.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_v4i64: ; RV32: # %bb.0: @@ -1224,8 +1098,6 @@ define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> % ret i64 %r } -declare i64 @llvm.vp.reduce.and.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_v4i64: ; RV32: # %bb.0: @@ -1259,8 +1131,6 @@ define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m ret i64 %r } -declare i64 @llvm.vp.reduce.or.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_v4i64: ; RV32: # %bb.0: @@ -1294,8 +1164,6 @@ define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, ret i64 %r } -declare i64 @llvm.vp.reduce.xor.v4i64(i64, <4 x i64>, <4 x i1>, i32) - define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_v4i64: ; RV32: # %bb.0: @@ -1329,8 +1197,6 @@ define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m ret i64 %r } -declare i8 @llvm.vp.reduce.mul.v1i8(i8, <1 x i8>, <1 x i1>, i32) - define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v1i8: ; RV32: # %bb.0: @@ -1381,8 +1247,6 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl) ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v2i8(i8, <2 x i8>, <2 x i1>, i32) - define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v2i8: ; RV32: # %bb.0: @@ -1441,8 +1305,6 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v4i8(i8, <4 x i8>, <4 x i1>, i32) - define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v4i8: ; RV32: # %bb.0: @@ -1505,8 +1367,6 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v8i8(i8, <8 x i8>, <8 x i1>, i32) - define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v8i8: ; RV32: # %bb.0: @@ -1573,8 +1433,6 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3 ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v16i8(i8, <16 x i8>, <16 x i1>, i32) - define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v16i8: ; RV32: # %bb.0: @@ -1645,8 +1503,6 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m, ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v32i8(i8, <32 x i8>, <32 x i1>, i32) - define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v32i8: ; RV32: # %bb.0: @@ -1723,8 +1579,6 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m, ret i8 %r } -declare i8 @llvm.vp.reduce.mul.v64i8(i8, <64 x i8>, <64 x i1>, i32) - define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_mul_v64i8: ; RV32: # %bb.0: @@ -1830,7 +1684,6 @@ define zeroext i8 @front_ele_v4i8(<4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { } ; Test start value is the first element of a vector which longer than M1. -declare i8 @llvm.vp.reduce.and.v32i8(i8, <32 x i8>, <32 x i1>, i32) define zeroext i8 @front_ele_v32i8(<32 x i8> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: front_ele_v32i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll index d3a36525115c8..9725bb37c679b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vector.reduce.add.v1i8(<1 x i8>) - define i8 @vreduce_add_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_add_v1i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define i8 @vreduce_add_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>) - define i8 @vreduce_add_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v2i8: ; CHECK: # %bb.0: @@ -30,8 +26,6 @@ define i8 @vreduce_add_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8>) - define i8 @vreduce_add_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v3i8: ; CHECK: # %bb.0: @@ -46,8 +40,6 @@ define i8 @vreduce_add_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>) - define i8 @vreduce_add_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v4i8: ; CHECK: # %bb.0: @@ -62,8 +54,6 @@ define i8 @vreduce_add_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) - define i8 @vreduce_add_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v8i8: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define i8 @vreduce_add_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) - define i8 @vreduce_add_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v16i8: ; CHECK: # %bb.0: @@ -94,8 +82,6 @@ define i8 @vreduce_add_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>) - define i8 @vreduce_add_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v32i8: ; CHECK: # %bb.0: @@ -111,8 +97,6 @@ define i8 @vreduce_add_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>) - define i8 @vreduce_add_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v64i8: ; CHECK: # %bb.0: @@ -128,8 +112,6 @@ define i8 @vreduce_add_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>) - define i8 @vreduce_add_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v128i8: ; CHECK: # %bb.0: @@ -145,8 +127,6 @@ define i8 @vreduce_add_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.v256i8(<256 x i8>) - define i8 @vreduce_add_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_add_v256i8: ; CHECK: # %bb.0: @@ -165,8 +145,6 @@ define i8 @vreduce_add_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.add.v1i16(<1 x i16>) - define i16 @vreduce_add_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_add_v1i16: ; CHECK: # %bb.0: @@ -201,8 +179,6 @@ define i16 @vwreduce_uadd_v1i16(<1 x i8> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>) - define i16 @vreduce_add_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v2i16: ; CHECK: # %bb.0: @@ -251,8 +227,6 @@ define i16 @vwreduce_uadd_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) - define i16 @vreduce_add_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v4i16: ; CHECK: # %bb.0: @@ -301,8 +275,6 @@ define i16 @vwreduce_uadd_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) - define i16 @vreduce_add_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v8i16: ; CHECK: # %bb.0: @@ -351,8 +323,6 @@ define i16 @vwreduce_uadd_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) - define i16 @vreduce_add_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v16i16: ; CHECK: # %bb.0: @@ -401,8 +371,6 @@ define i16 @vwreduce_uadd_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>) - define i16 @vreduce_add_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v32i16: ; CHECK: # %bb.0: @@ -454,8 +422,6 @@ define i16 @vwreduce_uadd_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>) - define i16 @vreduce_add_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v64i16: ; CHECK: # %bb.0: @@ -507,8 +473,6 @@ define i16 @vwreduce_uadd_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.v128i16(<128 x i16>) - define i16 @vreduce_add_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_add_v128i16: ; CHECK: # %bb.0: @@ -571,8 +535,6 @@ define i16 @vwreduce_uadd_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32>) - define i32 @vreduce_add_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_add_v1i32: ; CHECK: # %bb.0: @@ -607,8 +569,6 @@ define i32 @vwreduce_uadd_v1i32(<1 x i16> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) - define i32 @vreduce_add_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v2i32: ; CHECK: # %bb.0: @@ -657,8 +617,6 @@ define i32 @vwreduce_uadd_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) - define i32 @vreduce_add_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v4i32: ; CHECK: # %bb.0: @@ -707,8 +665,6 @@ define i32 @vwreduce_uadd_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) - define i32 @vreduce_add_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v8i32: ; CHECK: # %bb.0: @@ -757,8 +713,6 @@ define i32 @vwreduce_uadd_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) - define i32 @vreduce_add_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v16i32: ; CHECK: # %bb.0: @@ -807,8 +761,6 @@ define i32 @vwreduce_uadd_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>) - define i32 @vreduce_add_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v32i32: ; CHECK: # %bb.0: @@ -860,8 +812,6 @@ define i32 @vwreduce_uadd_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>) - define i32 @vreduce_add_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_add_v64i32: ; CHECK: # %bb.0: @@ -924,8 +874,6 @@ define i32 @vwreduce_uadd_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64>) - define i64 @vreduce_add_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_add_v1i64: ; RV32: # %bb.0: @@ -989,8 +937,6 @@ define i64 @vwreduce_uadd_v1i64(<1 x i32> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) - define i64 @vreduce_add_v2i64(ptr %x) { ; RV32-LABEL: vreduce_add_v2i64: ; RV32: # %bb.0: @@ -1080,8 +1026,6 @@ define i64 @vwreduce_uadd_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) - define i64 @vreduce_add_v4i64(ptr %x) { ; RV32-LABEL: vreduce_add_v4i64: ; RV32: # %bb.0: @@ -1171,8 +1115,6 @@ define i64 @vwreduce_uadd_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) - define i64 @vreduce_add_v8i64(ptr %x) { ; RV32-LABEL: vreduce_add_v8i64: ; RV32: # %bb.0: @@ -1262,8 +1204,6 @@ define i64 @vwreduce_uadd_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) - define i64 @vreduce_add_v16i64(ptr %x) { ; RV32-LABEL: vreduce_add_v16i64: ; RV32: # %bb.0: @@ -1353,8 +1293,6 @@ define i64 @vwreduce_uadd_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v32i64(<32 x i64>) - define i64 @vreduce_add_v32i64(ptr %x) { ; RV32-LABEL: vreduce_add_v32i64: ; RV32: # %bb.0: @@ -1466,8 +1404,6 @@ define i64 @vwreduce_uadd_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.v64i64(<64 x i64>) - define i64 @vreduce_add_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_add_v64i64: ; RV32: # %bb.0: @@ -1645,8 +1581,6 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) { ret i64 %red } -declare i8 @llvm.vector.reduce.and.v1i8(<1 x i8>) - define i8 @vreduce_and_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_and_v1i8: ; CHECK: # %bb.0: @@ -1657,8 +1591,6 @@ define i8 @vreduce_and_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>) - define i8 @vreduce_and_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v2i8: ; CHECK: # %bb.0: @@ -1672,8 +1604,6 @@ define i8 @vreduce_and_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>) - define i8 @vreduce_and_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v3i8: ; CHECK: # %bb.0: @@ -1689,9 +1619,6 @@ define i8 @vreduce_and_v3i8(ptr %x) { ret i8 %red } - -declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>) - define i8 @vreduce_and_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v4i8: ; CHECK: # %bb.0: @@ -1705,8 +1632,6 @@ define i8 @vreduce_and_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>) - define i8 @vreduce_and_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v8i8: ; CHECK: # %bb.0: @@ -1720,8 +1645,6 @@ define i8 @vreduce_and_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>) - define i8 @vreduce_and_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v16i8: ; CHECK: # %bb.0: @@ -1735,8 +1658,6 @@ define i8 @vreduce_and_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>) - define i8 @vreduce_and_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v32i8: ; CHECK: # %bb.0: @@ -1751,8 +1672,6 @@ define i8 @vreduce_and_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>) - define i8 @vreduce_and_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v64i8: ; CHECK: # %bb.0: @@ -1767,8 +1686,6 @@ define i8 @vreduce_and_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>) - define i8 @vreduce_and_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v128i8: ; CHECK: # %bb.0: @@ -1783,8 +1700,6 @@ define i8 @vreduce_and_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.v256i8(<256 x i8>) - define i8 @vreduce_and_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_and_v256i8: ; CHECK: # %bb.0: @@ -1802,8 +1717,6 @@ define i8 @vreduce_and_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.and.v1i16(<1 x i16>) - define i16 @vreduce_and_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_and_v1i16: ; CHECK: # %bb.0: @@ -1814,8 +1727,6 @@ define i16 @vreduce_and_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>) - define i16 @vreduce_and_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v2i16: ; CHECK: # %bb.0: @@ -1829,8 +1740,6 @@ define i16 @vreduce_and_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>) - define i16 @vreduce_and_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v4i16: ; CHECK: # %bb.0: @@ -1844,8 +1753,6 @@ define i16 @vreduce_and_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>) - define i16 @vreduce_and_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v8i16: ; CHECK: # %bb.0: @@ -1859,8 +1766,6 @@ define i16 @vreduce_and_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>) - define i16 @vreduce_and_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v16i16: ; CHECK: # %bb.0: @@ -1874,8 +1779,6 @@ define i16 @vreduce_and_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v32i16(<32 x i16>) - define i16 @vreduce_and_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v32i16: ; CHECK: # %bb.0: @@ -1890,8 +1793,6 @@ define i16 @vreduce_and_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v64i16(<64 x i16>) - define i16 @vreduce_and_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v64i16: ; CHECK: # %bb.0: @@ -1906,8 +1807,6 @@ define i16 @vreduce_and_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.v128i16(<128 x i16>) - define i16 @vreduce_and_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_and_v128i16: ; CHECK: # %bb.0: @@ -1925,8 +1824,6 @@ define i16 @vreduce_and_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.and.v1i32(<1 x i32>) - define i32 @vreduce_and_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_and_v1i32: ; CHECK: # %bb.0: @@ -1937,8 +1834,6 @@ define i32 @vreduce_and_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>) - define i32 @vreduce_and_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v2i32: ; CHECK: # %bb.0: @@ -1952,8 +1847,6 @@ define i32 @vreduce_and_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>) - define i32 @vreduce_and_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v4i32: ; CHECK: # %bb.0: @@ -1967,8 +1860,6 @@ define i32 @vreduce_and_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>) - define i32 @vreduce_and_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v8i32: ; CHECK: # %bb.0: @@ -1982,8 +1873,6 @@ define i32 @vreduce_and_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>) - define i32 @vreduce_and_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v16i32: ; CHECK: # %bb.0: @@ -1997,8 +1886,6 @@ define i32 @vreduce_and_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v32i32(<32 x i32>) - define i32 @vreduce_and_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v32i32: ; CHECK: # %bb.0: @@ -2013,8 +1900,6 @@ define i32 @vreduce_and_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.v64i32(<64 x i32>) - define i32 @vreduce_and_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_and_v64i32: ; CHECK: # %bb.0: @@ -2032,8 +1917,6 @@ define i32 @vreduce_and_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.and.v1i64(<1 x i64>) - define i64 @vreduce_and_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_and_v1i64: ; RV32: # %bb.0: @@ -2053,8 +1936,6 @@ define i64 @vreduce_and_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>) - define i64 @vreduce_and_v2i64(ptr %x) { ; RV32-LABEL: vreduce_and_v2i64: ; RV32: # %bb.0: @@ -2080,8 +1961,6 @@ define i64 @vreduce_and_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>) - define i64 @vreduce_and_v4i64(ptr %x) { ; RV32-LABEL: vreduce_and_v4i64: ; RV32: # %bb.0: @@ -2107,8 +1986,6 @@ define i64 @vreduce_and_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>) - define i64 @vreduce_and_v8i64(ptr %x) { ; RV32-LABEL: vreduce_and_v8i64: ; RV32: # %bb.0: @@ -2134,8 +2011,6 @@ define i64 @vreduce_and_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>) - define i64 @vreduce_and_v16i64(ptr %x) { ; RV32-LABEL: vreduce_and_v16i64: ; RV32: # %bb.0: @@ -2161,8 +2036,6 @@ define i64 @vreduce_and_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v32i64(<32 x i64>) - define i64 @vreduce_and_v32i64(ptr %x) { ; RV32-LABEL: vreduce_and_v32i64: ; RV32: # %bb.0: @@ -2194,8 +2067,6 @@ define i64 @vreduce_and_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.v64i64(<64 x i64>) - define i64 @vreduce_and_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_and_v64i64: ; RV32: # %bb.0: @@ -2239,8 +2110,6 @@ define i64 @vreduce_and_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.or.v1i8(<1 x i8>) - define i8 @vreduce_or_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_or_v1i8: ; CHECK: # %bb.0: @@ -2251,8 +2120,6 @@ define i8 @vreduce_or_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>) - define i8 @vreduce_or_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v2i8: ; CHECK: # %bb.0: @@ -2266,8 +2133,6 @@ define i8 @vreduce_or_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v3i8(<3 x i8>) - define i8 @vreduce_or_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v3i8: ; CHECK: # %bb.0: @@ -2282,8 +2147,6 @@ define i8 @vreduce_or_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>) - define i8 @vreduce_or_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v4i8: ; CHECK: # %bb.0: @@ -2297,8 +2160,6 @@ define i8 @vreduce_or_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>) - define i8 @vreduce_or_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v8i8: ; CHECK: # %bb.0: @@ -2312,8 +2173,6 @@ define i8 @vreduce_or_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>) - define i8 @vreduce_or_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v16i8: ; CHECK: # %bb.0: @@ -2327,8 +2186,6 @@ define i8 @vreduce_or_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>) - define i8 @vreduce_or_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v32i8: ; CHECK: # %bb.0: @@ -2343,8 +2200,6 @@ define i8 @vreduce_or_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v64i8(<64 x i8>) - define i8 @vreduce_or_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v64i8: ; CHECK: # %bb.0: @@ -2359,8 +2214,6 @@ define i8 @vreduce_or_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v128i8(<128 x i8>) - define i8 @vreduce_or_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v128i8: ; CHECK: # %bb.0: @@ -2375,8 +2228,6 @@ define i8 @vreduce_or_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.v256i8(<256 x i8>) - define i8 @vreduce_or_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_or_v256i8: ; CHECK: # %bb.0: @@ -2394,8 +2245,6 @@ define i8 @vreduce_or_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.or.v1i16(<1 x i16>) - define i16 @vreduce_or_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_or_v1i16: ; CHECK: # %bb.0: @@ -2406,8 +2255,6 @@ define i16 @vreduce_or_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>) - define i16 @vreduce_or_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v2i16: ; CHECK: # %bb.0: @@ -2421,8 +2268,6 @@ define i16 @vreduce_or_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>) - define i16 @vreduce_or_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v4i16: ; CHECK: # %bb.0: @@ -2436,8 +2281,6 @@ define i16 @vreduce_or_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>) - define i16 @vreduce_or_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v8i16: ; CHECK: # %bb.0: @@ -2451,8 +2294,6 @@ define i16 @vreduce_or_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>) - define i16 @vreduce_or_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v16i16: ; CHECK: # %bb.0: @@ -2466,8 +2307,6 @@ define i16 @vreduce_or_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v32i16(<32 x i16>) - define i16 @vreduce_or_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v32i16: ; CHECK: # %bb.0: @@ -2482,8 +2321,6 @@ define i16 @vreduce_or_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v64i16(<64 x i16>) - define i16 @vreduce_or_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v64i16: ; CHECK: # %bb.0: @@ -2498,8 +2335,6 @@ define i16 @vreduce_or_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.v128i16(<128 x i16>) - define i16 @vreduce_or_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_or_v128i16: ; CHECK: # %bb.0: @@ -2517,8 +2352,6 @@ define i16 @vreduce_or_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.or.v1i32(<1 x i32>) - define i32 @vreduce_or_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_or_v1i32: ; CHECK: # %bb.0: @@ -2529,8 +2362,6 @@ define i32 @vreduce_or_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>) - define i32 @vreduce_or_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v2i32: ; CHECK: # %bb.0: @@ -2544,8 +2375,6 @@ define i32 @vreduce_or_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>) - define i32 @vreduce_or_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v4i32: ; CHECK: # %bb.0: @@ -2559,8 +2388,6 @@ define i32 @vreduce_or_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>) - define i32 @vreduce_or_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v8i32: ; CHECK: # %bb.0: @@ -2574,8 +2401,6 @@ define i32 @vreduce_or_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>) - define i32 @vreduce_or_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v16i32: ; CHECK: # %bb.0: @@ -2589,8 +2414,6 @@ define i32 @vreduce_or_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v32i32(<32 x i32>) - define i32 @vreduce_or_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v32i32: ; CHECK: # %bb.0: @@ -2605,8 +2428,6 @@ define i32 @vreduce_or_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.v64i32(<64 x i32>) - define i32 @vreduce_or_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_or_v64i32: ; CHECK: # %bb.0: @@ -2624,8 +2445,6 @@ define i32 @vreduce_or_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.or.v1i64(<1 x i64>) - define i64 @vreduce_or_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_or_v1i64: ; RV32: # %bb.0: @@ -2645,8 +2464,6 @@ define i64 @vreduce_or_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>) - define i64 @vreduce_or_v2i64(ptr %x) { ; RV32-LABEL: vreduce_or_v2i64: ; RV32: # %bb.0: @@ -2672,8 +2489,6 @@ define i64 @vreduce_or_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>) - define i64 @vreduce_or_v4i64(ptr %x) { ; RV32-LABEL: vreduce_or_v4i64: ; RV32: # %bb.0: @@ -2699,8 +2514,6 @@ define i64 @vreduce_or_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>) - define i64 @vreduce_or_v8i64(ptr %x) { ; RV32-LABEL: vreduce_or_v8i64: ; RV32: # %bb.0: @@ -2726,8 +2539,6 @@ define i64 @vreduce_or_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>) - define i64 @vreduce_or_v16i64(ptr %x) { ; RV32-LABEL: vreduce_or_v16i64: ; RV32: # %bb.0: @@ -2753,8 +2564,6 @@ define i64 @vreduce_or_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v32i64(<32 x i64>) - define i64 @vreduce_or_v32i64(ptr %x) { ; RV32-LABEL: vreduce_or_v32i64: ; RV32: # %bb.0: @@ -2786,8 +2595,6 @@ define i64 @vreduce_or_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.v64i64(<64 x i64>) - define i64 @vreduce_or_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_or_v64i64: ; RV32: # %bb.0: @@ -2831,8 +2638,6 @@ define i64 @vreduce_or_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>) - define i8 @vreduce_xor_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_xor_v1i8: ; CHECK: # %bb.0: @@ -2843,8 +2648,6 @@ define i8 @vreduce_xor_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v2i8(<2 x i8>) - define i8 @vreduce_xor_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v2i8: ; CHECK: # %bb.0: @@ -2859,8 +2662,6 @@ define i8 @vreduce_xor_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>) - define i8 @vreduce_xor_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v3i8: ; CHECK: # %bb.0: @@ -2875,8 +2676,6 @@ define i8 @vreduce_xor_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>) - define i8 @vreduce_xor_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v4i8: ; CHECK: # %bb.0: @@ -2891,8 +2690,6 @@ define i8 @vreduce_xor_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>) - define i8 @vreduce_xor_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v8i8: ; CHECK: # %bb.0: @@ -2907,8 +2704,6 @@ define i8 @vreduce_xor_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>) - define i8 @vreduce_xor_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v16i8: ; CHECK: # %bb.0: @@ -2923,8 +2718,6 @@ define i8 @vreduce_xor_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>) - define i8 @vreduce_xor_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v32i8: ; CHECK: # %bb.0: @@ -2940,8 +2733,6 @@ define i8 @vreduce_xor_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v64i8(<64 x i8>) - define i8 @vreduce_xor_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v64i8: ; CHECK: # %bb.0: @@ -2957,8 +2748,6 @@ define i8 @vreduce_xor_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v128i8(<128 x i8>) - define i8 @vreduce_xor_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v128i8: ; CHECK: # %bb.0: @@ -2974,8 +2763,6 @@ define i8 @vreduce_xor_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.v256i8(<256 x i8>) - define i8 @vreduce_xor_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_xor_v256i8: ; CHECK: # %bb.0: @@ -2994,8 +2781,6 @@ define i8 @vreduce_xor_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.xor.v1i16(<1 x i16>) - define i16 @vreduce_xor_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_xor_v1i16: ; CHECK: # %bb.0: @@ -3006,8 +2791,6 @@ define i16 @vreduce_xor_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>) - define i16 @vreduce_xor_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v2i16: ; CHECK: # %bb.0: @@ -3022,8 +2805,6 @@ define i16 @vreduce_xor_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>) - define i16 @vreduce_xor_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v4i16: ; CHECK: # %bb.0: @@ -3038,8 +2819,6 @@ define i16 @vreduce_xor_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>) - define i16 @vreduce_xor_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v8i16: ; CHECK: # %bb.0: @@ -3054,8 +2833,6 @@ define i16 @vreduce_xor_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>) - define i16 @vreduce_xor_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v16i16: ; CHECK: # %bb.0: @@ -3070,8 +2847,6 @@ define i16 @vreduce_xor_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v32i16(<32 x i16>) - define i16 @vreduce_xor_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v32i16: ; CHECK: # %bb.0: @@ -3087,8 +2862,6 @@ define i16 @vreduce_xor_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v64i16(<64 x i16>) - define i16 @vreduce_xor_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v64i16: ; CHECK: # %bb.0: @@ -3104,8 +2877,6 @@ define i16 @vreduce_xor_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.v128i16(<128 x i16>) - define i16 @vreduce_xor_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_xor_v128i16: ; CHECK: # %bb.0: @@ -3124,8 +2895,6 @@ define i16 @vreduce_xor_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.xor.v1i32(<1 x i32>) - define i32 @vreduce_xor_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_xor_v1i32: ; CHECK: # %bb.0: @@ -3136,8 +2905,6 @@ define i32 @vreduce_xor_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>) - define i32 @vreduce_xor_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v2i32: ; CHECK: # %bb.0: @@ -3152,8 +2919,6 @@ define i32 @vreduce_xor_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>) - define i32 @vreduce_xor_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v4i32: ; CHECK: # %bb.0: @@ -3168,8 +2933,6 @@ define i32 @vreduce_xor_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>) - define i32 @vreduce_xor_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v8i32: ; CHECK: # %bb.0: @@ -3184,8 +2947,6 @@ define i32 @vreduce_xor_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v16i32(<16 x i32>) - define i32 @vreduce_xor_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v16i32: ; CHECK: # %bb.0: @@ -3200,8 +2961,6 @@ define i32 @vreduce_xor_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v32i32(<32 x i32>) - define i32 @vreduce_xor_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v32i32: ; CHECK: # %bb.0: @@ -3217,8 +2976,6 @@ define i32 @vreduce_xor_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.v64i32(<64 x i32>) - define i32 @vreduce_xor_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_xor_v64i32: ; CHECK: # %bb.0: @@ -3237,8 +2994,6 @@ define i32 @vreduce_xor_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.xor.v1i64(<1 x i64>) - define i64 @vreduce_xor_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_xor_v1i64: ; RV32: # %bb.0: @@ -3258,8 +3013,6 @@ define i64 @vreduce_xor_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>) - define i64 @vreduce_xor_v2i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v2i64: ; RV32: # %bb.0: @@ -3287,8 +3040,6 @@ define i64 @vreduce_xor_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>) - define i64 @vreduce_xor_v4i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v4i64: ; RV32: # %bb.0: @@ -3316,8 +3067,6 @@ define i64 @vreduce_xor_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v8i64(<8 x i64>) - define i64 @vreduce_xor_v8i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v8i64: ; RV32: # %bb.0: @@ -3345,8 +3094,6 @@ define i64 @vreduce_xor_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>) - define i64 @vreduce_xor_v16i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v16i64: ; RV32: # %bb.0: @@ -3374,8 +3121,6 @@ define i64 @vreduce_xor_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v32i64(<32 x i64>) - define i64 @vreduce_xor_v32i64(ptr %x) { ; RV32-LABEL: vreduce_xor_v32i64: ; RV32: # %bb.0: @@ -3409,8 +3154,6 @@ define i64 @vreduce_xor_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.v64i64(<64 x i64>) - define i64 @vreduce_xor_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_xor_v64i64: ; RV32: # %bb.0: @@ -3456,8 +3199,6 @@ define i64 @vreduce_xor_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.smin.v1i8(<1 x i8>) - define i8 @vreduce_smin_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_smin_v1i8: ; CHECK: # %bb.0: @@ -3468,8 +3209,6 @@ define i8 @vreduce_smin_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>) - define i8 @vreduce_smin_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v2i8: ; CHECK: # %bb.0: @@ -3483,8 +3222,6 @@ define i8 @vreduce_smin_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v3i8(<3 x i8>) - define i8 @vreduce_smin_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v3i8: ; CHECK: # %bb.0: @@ -3500,8 +3237,6 @@ define i8 @vreduce_smin_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>) - define i8 @vreduce_smin_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v4i8: ; CHECK: # %bb.0: @@ -3515,8 +3250,6 @@ define i8 @vreduce_smin_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>) - define i8 @vreduce_smin_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v8i8: ; CHECK: # %bb.0: @@ -3530,8 +3263,6 @@ define i8 @vreduce_smin_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>) - define i8 @vreduce_smin_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v16i8: ; CHECK: # %bb.0: @@ -3545,8 +3276,6 @@ define i8 @vreduce_smin_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>) - define i8 @vreduce_smin_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v32i8: ; CHECK: # %bb.0: @@ -3561,8 +3290,6 @@ define i8 @vreduce_smin_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v64i8(<64 x i8>) - define i8 @vreduce_smin_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v64i8: ; CHECK: # %bb.0: @@ -3577,8 +3304,6 @@ define i8 @vreduce_smin_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v128i8(<128 x i8>) - define i8 @vreduce_smin_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v128i8: ; CHECK: # %bb.0: @@ -3593,8 +3318,6 @@ define i8 @vreduce_smin_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.v256i8(<256 x i8>) - define i8 @vreduce_smin_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_smin_v256i8: ; CHECK: # %bb.0: @@ -3612,8 +3335,6 @@ define i8 @vreduce_smin_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.smin.v1i16(<1 x i16>) - define i16 @vreduce_smin_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_smin_v1i16: ; CHECK: # %bb.0: @@ -3624,8 +3345,6 @@ define i16 @vreduce_smin_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>) - define i16 @vreduce_smin_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v2i16: ; CHECK: # %bb.0: @@ -3639,8 +3358,6 @@ define i16 @vreduce_smin_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>) - define i16 @vreduce_smin_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v4i16: ; CHECK: # %bb.0: @@ -3654,8 +3371,6 @@ define i16 @vreduce_smin_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>) - define i16 @vreduce_smin_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v8i16: ; CHECK: # %bb.0: @@ -3669,8 +3384,6 @@ define i16 @vreduce_smin_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>) - define i16 @vreduce_smin_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v16i16: ; CHECK: # %bb.0: @@ -3684,8 +3397,6 @@ define i16 @vreduce_smin_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v32i16(<32 x i16>) - define i16 @vreduce_smin_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v32i16: ; CHECK: # %bb.0: @@ -3700,8 +3411,6 @@ define i16 @vreduce_smin_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v64i16(<64 x i16>) - define i16 @vreduce_smin_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v64i16: ; CHECK: # %bb.0: @@ -3716,8 +3425,6 @@ define i16 @vreduce_smin_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.v128i16(<128 x i16>) - define i16 @vreduce_smin_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_smin_v128i16: ; CHECK: # %bb.0: @@ -3735,8 +3442,6 @@ define i16 @vreduce_smin_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.smin.v1i32(<1 x i32>) - define i32 @vreduce_smin_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_smin_v1i32: ; CHECK: # %bb.0: @@ -3747,8 +3452,6 @@ define i32 @vreduce_smin_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>) - define i32 @vreduce_smin_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v2i32: ; CHECK: # %bb.0: @@ -3762,8 +3465,6 @@ define i32 @vreduce_smin_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>) - define i32 @vreduce_smin_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v4i32: ; CHECK: # %bb.0: @@ -3777,8 +3478,6 @@ define i32 @vreduce_smin_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>) - define i32 @vreduce_smin_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v8i32: ; CHECK: # %bb.0: @@ -3792,8 +3491,6 @@ define i32 @vreduce_smin_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>) - define i32 @vreduce_smin_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v16i32: ; CHECK: # %bb.0: @@ -3807,8 +3504,6 @@ define i32 @vreduce_smin_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v32i32(<32 x i32>) - define i32 @vreduce_smin_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v32i32: ; CHECK: # %bb.0: @@ -3823,8 +3518,6 @@ define i32 @vreduce_smin_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.v64i32(<64 x i32>) - define i32 @vreduce_smin_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_smin_v64i32: ; CHECK: # %bb.0: @@ -3842,8 +3535,6 @@ define i32 @vreduce_smin_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.smin.v1i64(<1 x i64>) - define i64 @vreduce_smin_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_smin_v1i64: ; RV32: # %bb.0: @@ -3863,8 +3554,6 @@ define i64 @vreduce_smin_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>) - define i64 @vreduce_smin_v2i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v2i64: ; RV32: # %bb.0: @@ -3890,8 +3579,6 @@ define i64 @vreduce_smin_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) - define i64 @vreduce_smin_v4i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v4i64: ; RV32: # %bb.0: @@ -3917,8 +3604,6 @@ define i64 @vreduce_smin_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>) - define i64 @vreduce_smin_v8i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v8i64: ; RV32: # %bb.0: @@ -3944,8 +3629,6 @@ define i64 @vreduce_smin_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>) - define i64 @vreduce_smin_v16i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v16i64: ; RV32: # %bb.0: @@ -3971,8 +3654,6 @@ define i64 @vreduce_smin_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v32i64(<32 x i64>) - define i64 @vreduce_smin_v32i64(ptr %x) { ; RV32-LABEL: vreduce_smin_v32i64: ; RV32: # %bb.0: @@ -4004,8 +3685,6 @@ define i64 @vreduce_smin_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.v64i64(<64 x i64>) - define i64 @vreduce_smin_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_smin_v64i64: ; RV32: # %bb.0: @@ -4049,8 +3728,6 @@ define i64 @vreduce_smin_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.smax.v1i8(<1 x i8>) - define i8 @vreduce_smax_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_smax_v1i8: ; CHECK: # %bb.0: @@ -4061,8 +3738,6 @@ define i8 @vreduce_smax_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>) - define i8 @vreduce_smax_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v2i8: ; CHECK: # %bb.0: @@ -4076,8 +3751,6 @@ define i8 @vreduce_smax_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v3i8(<3 x i8>) - define i8 @vreduce_smax_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v3i8: ; CHECK: # %bb.0: @@ -4093,8 +3766,6 @@ define i8 @vreduce_smax_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>) - define i8 @vreduce_smax_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v4i8: ; CHECK: # %bb.0: @@ -4108,8 +3779,6 @@ define i8 @vreduce_smax_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>) - define i8 @vreduce_smax_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v8i8: ; CHECK: # %bb.0: @@ -4123,8 +3792,6 @@ define i8 @vreduce_smax_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>) - define i8 @vreduce_smax_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v16i8: ; CHECK: # %bb.0: @@ -4138,8 +3805,6 @@ define i8 @vreduce_smax_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>) - define i8 @vreduce_smax_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v32i8: ; CHECK: # %bb.0: @@ -4154,8 +3819,6 @@ define i8 @vreduce_smax_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v64i8(<64 x i8>) - define i8 @vreduce_smax_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v64i8: ; CHECK: # %bb.0: @@ -4170,8 +3833,6 @@ define i8 @vreduce_smax_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v128i8(<128 x i8>) - define i8 @vreduce_smax_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v128i8: ; CHECK: # %bb.0: @@ -4186,8 +3847,6 @@ define i8 @vreduce_smax_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.v256i8(<256 x i8>) - define i8 @vreduce_smax_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_smax_v256i8: ; CHECK: # %bb.0: @@ -4205,8 +3864,6 @@ define i8 @vreduce_smax_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.smax.v1i16(<1 x i16>) - define i16 @vreduce_smax_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_smax_v1i16: ; CHECK: # %bb.0: @@ -4217,8 +3874,6 @@ define i16 @vreduce_smax_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>) - define i16 @vreduce_smax_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v2i16: ; CHECK: # %bb.0: @@ -4232,8 +3887,6 @@ define i16 @vreduce_smax_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>) - define i16 @vreduce_smax_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v4i16: ; CHECK: # %bb.0: @@ -4247,8 +3900,6 @@ define i16 @vreduce_smax_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>) - define i16 @vreduce_smax_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v8i16: ; CHECK: # %bb.0: @@ -4262,8 +3913,6 @@ define i16 @vreduce_smax_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>) - define i16 @vreduce_smax_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v16i16: ; CHECK: # %bb.0: @@ -4277,8 +3926,6 @@ define i16 @vreduce_smax_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v32i16(<32 x i16>) - define i16 @vreduce_smax_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v32i16: ; CHECK: # %bb.0: @@ -4293,8 +3940,6 @@ define i16 @vreduce_smax_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v64i16(<64 x i16>) - define i16 @vreduce_smax_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v64i16: ; CHECK: # %bb.0: @@ -4309,8 +3954,6 @@ define i16 @vreduce_smax_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.v128i16(<128 x i16>) - define i16 @vreduce_smax_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_smax_v128i16: ; CHECK: # %bb.0: @@ -4328,8 +3971,6 @@ define i16 @vreduce_smax_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.smax.v1i32(<1 x i32>) - define i32 @vreduce_smax_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_smax_v1i32: ; CHECK: # %bb.0: @@ -4340,8 +3981,6 @@ define i32 @vreduce_smax_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>) - define i32 @vreduce_smax_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v2i32: ; CHECK: # %bb.0: @@ -4355,8 +3994,6 @@ define i32 @vreduce_smax_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>) - define i32 @vreduce_smax_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v4i32: ; CHECK: # %bb.0: @@ -4370,8 +4007,6 @@ define i32 @vreduce_smax_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>) - define i32 @vreduce_smax_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v8i32: ; CHECK: # %bb.0: @@ -4385,8 +4020,6 @@ define i32 @vreduce_smax_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>) - define i32 @vreduce_smax_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v16i32: ; CHECK: # %bb.0: @@ -4400,8 +4033,6 @@ define i32 @vreduce_smax_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v32i32(<32 x i32>) - define i32 @vreduce_smax_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v32i32: ; CHECK: # %bb.0: @@ -4416,8 +4047,6 @@ define i32 @vreduce_smax_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.v64i32(<64 x i32>) - define i32 @vreduce_smax_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_smax_v64i32: ; CHECK: # %bb.0: @@ -4435,8 +4064,6 @@ define i32 @vreduce_smax_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.smax.v1i64(<1 x i64>) - define i64 @vreduce_smax_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_smax_v1i64: ; RV32: # %bb.0: @@ -4456,8 +4083,6 @@ define i64 @vreduce_smax_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>) - define i64 @vreduce_smax_v2i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v2i64: ; RV32: # %bb.0: @@ -4483,8 +4108,6 @@ define i64 @vreduce_smax_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) - define i64 @vreduce_smax_v4i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v4i64: ; RV32: # %bb.0: @@ -4510,8 +4133,6 @@ define i64 @vreduce_smax_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>) - define i64 @vreduce_smax_v8i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v8i64: ; RV32: # %bb.0: @@ -4537,8 +4158,6 @@ define i64 @vreduce_smax_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>) - define i64 @vreduce_smax_v16i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v16i64: ; RV32: # %bb.0: @@ -4564,8 +4183,6 @@ define i64 @vreduce_smax_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v32i64(<32 x i64>) - define i64 @vreduce_smax_v32i64(ptr %x) { ; RV32-LABEL: vreduce_smax_v32i64: ; RV32: # %bb.0: @@ -4597,8 +4214,6 @@ define i64 @vreduce_smax_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.v64i64(<64 x i64>) - define i64 @vreduce_smax_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_smax_v64i64: ; RV32: # %bb.0: @@ -4642,8 +4257,6 @@ define i64 @vreduce_smax_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.umin.v1i8(<1 x i8>) - define i8 @vreduce_umin_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_umin_v1i8: ; CHECK: # %bb.0: @@ -4654,8 +4267,6 @@ define i8 @vreduce_umin_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>) - define i8 @vreduce_umin_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v2i8: ; CHECK: # %bb.0: @@ -4669,8 +4280,6 @@ define i8 @vreduce_umin_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v3i8(<3 x i8>) - define i8 @vreduce_umin_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v3i8: ; CHECK: # %bb.0: @@ -4686,8 +4295,6 @@ define i8 @vreduce_umin_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>) - define i8 @vreduce_umin_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v4i8: ; CHECK: # %bb.0: @@ -4701,8 +4308,6 @@ define i8 @vreduce_umin_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>) - define i8 @vreduce_umin_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v8i8: ; CHECK: # %bb.0: @@ -4716,8 +4321,6 @@ define i8 @vreduce_umin_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>) - define i8 @vreduce_umin_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v16i8: ; CHECK: # %bb.0: @@ -4731,8 +4334,6 @@ define i8 @vreduce_umin_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>) - define i8 @vreduce_umin_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v32i8: ; CHECK: # %bb.0: @@ -4747,8 +4348,6 @@ define i8 @vreduce_umin_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v64i8(<64 x i8>) - define i8 @vreduce_umin_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v64i8: ; CHECK: # %bb.0: @@ -4763,8 +4362,6 @@ define i8 @vreduce_umin_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v128i8(<128 x i8>) - define i8 @vreduce_umin_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v128i8: ; CHECK: # %bb.0: @@ -4779,8 +4376,6 @@ define i8 @vreduce_umin_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.v256i8(<256 x i8>) - define i8 @vreduce_umin_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_umin_v256i8: ; CHECK: # %bb.0: @@ -4798,8 +4393,6 @@ define i8 @vreduce_umin_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.umin.v1i16(<1 x i16>) - define i16 @vreduce_umin_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_umin_v1i16: ; CHECK: # %bb.0: @@ -4810,8 +4403,6 @@ define i16 @vreduce_umin_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>) - define i16 @vreduce_umin_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v2i16: ; CHECK: # %bb.0: @@ -4825,8 +4416,6 @@ define i16 @vreduce_umin_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>) - define i16 @vreduce_umin_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v4i16: ; CHECK: # %bb.0: @@ -4840,8 +4429,6 @@ define i16 @vreduce_umin_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>) - define i16 @vreduce_umin_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v8i16: ; CHECK: # %bb.0: @@ -4855,8 +4442,6 @@ define i16 @vreduce_umin_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>) - define i16 @vreduce_umin_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v16i16: ; CHECK: # %bb.0: @@ -4870,8 +4455,6 @@ define i16 @vreduce_umin_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v32i16(<32 x i16>) - define i16 @vreduce_umin_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v32i16: ; CHECK: # %bb.0: @@ -4886,8 +4469,6 @@ define i16 @vreduce_umin_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v64i16(<64 x i16>) - define i16 @vreduce_umin_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v64i16: ; CHECK: # %bb.0: @@ -4902,8 +4483,6 @@ define i16 @vreduce_umin_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.v128i16(<128 x i16>) - define i16 @vreduce_umin_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_umin_v128i16: ; CHECK: # %bb.0: @@ -4921,8 +4500,6 @@ define i16 @vreduce_umin_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.umin.v1i32(<1 x i32>) - define i32 @vreduce_umin_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_umin_v1i32: ; CHECK: # %bb.0: @@ -4933,8 +4510,6 @@ define i32 @vreduce_umin_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>) - define i32 @vreduce_umin_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v2i32: ; CHECK: # %bb.0: @@ -4948,8 +4523,6 @@ define i32 @vreduce_umin_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>) - define i32 @vreduce_umin_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v4i32: ; CHECK: # %bb.0: @@ -4963,8 +4536,6 @@ define i32 @vreduce_umin_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>) - define i32 @vreduce_umin_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v8i32: ; CHECK: # %bb.0: @@ -4978,8 +4549,6 @@ define i32 @vreduce_umin_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>) - define i32 @vreduce_umin_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v16i32: ; CHECK: # %bb.0: @@ -4993,8 +4562,6 @@ define i32 @vreduce_umin_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v32i32(<32 x i32>) - define i32 @vreduce_umin_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v32i32: ; CHECK: # %bb.0: @@ -5009,8 +4576,6 @@ define i32 @vreduce_umin_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.v64i32(<64 x i32>) - define i32 @vreduce_umin_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_umin_v64i32: ; CHECK: # %bb.0: @@ -5028,8 +4593,6 @@ define i32 @vreduce_umin_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.umin.v1i64(<1 x i64>) - define i64 @vreduce_umin_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_umin_v1i64: ; RV32: # %bb.0: @@ -5049,8 +4612,6 @@ define i64 @vreduce_umin_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>) - define i64 @vreduce_umin_v2i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v2i64: ; RV32: # %bb.0: @@ -5076,8 +4637,6 @@ define i64 @vreduce_umin_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) - define i64 @vreduce_umin_v4i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v4i64: ; RV32: # %bb.0: @@ -5103,8 +4662,6 @@ define i64 @vreduce_umin_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>) - define i64 @vreduce_umin_v8i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v8i64: ; RV32: # %bb.0: @@ -5130,8 +4687,6 @@ define i64 @vreduce_umin_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>) - define i64 @vreduce_umin_v16i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v16i64: ; RV32: # %bb.0: @@ -5157,8 +4712,6 @@ define i64 @vreduce_umin_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v32i64(<32 x i64>) - define i64 @vreduce_umin_v32i64(ptr %x) { ; RV32-LABEL: vreduce_umin_v32i64: ; RV32: # %bb.0: @@ -5190,8 +4743,6 @@ define i64 @vreduce_umin_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.v64i64(<64 x i64>) - define i64 @vreduce_umin_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_umin_v64i64: ; RV32: # %bb.0: @@ -5235,8 +4786,6 @@ define i64 @vreduce_umin_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8>) - define i8 @vreduce_umax_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_umax_v1i8: ; CHECK: # %bb.0: @@ -5247,8 +4796,6 @@ define i8 @vreduce_umax_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>) - define i8 @vreduce_umax_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v2i8: ; CHECK: # %bb.0: @@ -5262,8 +4809,6 @@ define i8 @vreduce_umax_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v3i8(<3 x i8>) - define i8 @vreduce_umax_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v3i8: ; CHECK: # %bb.0: @@ -5278,8 +4823,6 @@ define i8 @vreduce_umax_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>) - define i8 @vreduce_umax_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v4i8: ; CHECK: # %bb.0: @@ -5293,8 +4836,6 @@ define i8 @vreduce_umax_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>) - define i8 @vreduce_umax_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v8i8: ; CHECK: # %bb.0: @@ -5308,8 +4849,6 @@ define i8 @vreduce_umax_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>) - define i8 @vreduce_umax_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v16i8: ; CHECK: # %bb.0: @@ -5323,8 +4862,6 @@ define i8 @vreduce_umax_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>) - define i8 @vreduce_umax_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v32i8: ; CHECK: # %bb.0: @@ -5339,8 +4876,6 @@ define i8 @vreduce_umax_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v64i8(<64 x i8>) - define i8 @vreduce_umax_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v64i8: ; CHECK: # %bb.0: @@ -5355,8 +4890,6 @@ define i8 @vreduce_umax_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v128i8(<128 x i8>) - define i8 @vreduce_umax_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v128i8: ; CHECK: # %bb.0: @@ -5371,8 +4904,6 @@ define i8 @vreduce_umax_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.v256i8(<256 x i8>) - define i8 @vreduce_umax_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_umax_v256i8: ; CHECK: # %bb.0: @@ -5390,8 +4921,6 @@ define i8 @vreduce_umax_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.umax.v1i16(<1 x i16>) - define i16 @vreduce_umax_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_umax_v1i16: ; CHECK: # %bb.0: @@ -5402,8 +4931,6 @@ define i16 @vreduce_umax_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>) - define i16 @vreduce_umax_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v2i16: ; CHECK: # %bb.0: @@ -5417,8 +4944,6 @@ define i16 @vreduce_umax_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>) - define i16 @vreduce_umax_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v4i16: ; CHECK: # %bb.0: @@ -5432,8 +4957,6 @@ define i16 @vreduce_umax_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>) - define i16 @vreduce_umax_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v8i16: ; CHECK: # %bb.0: @@ -5447,8 +4970,6 @@ define i16 @vreduce_umax_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>) - define i16 @vreduce_umax_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v16i16: ; CHECK: # %bb.0: @@ -5462,8 +4983,6 @@ define i16 @vreduce_umax_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v32i16(<32 x i16>) - define i16 @vreduce_umax_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v32i16: ; CHECK: # %bb.0: @@ -5478,8 +4997,6 @@ define i16 @vreduce_umax_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v64i16(<64 x i16>) - define i16 @vreduce_umax_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v64i16: ; CHECK: # %bb.0: @@ -5494,8 +5011,6 @@ define i16 @vreduce_umax_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.v128i16(<128 x i16>) - define i16 @vreduce_umax_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_umax_v128i16: ; CHECK: # %bb.0: @@ -5513,8 +5028,6 @@ define i16 @vreduce_umax_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.umax.v1i32(<1 x i32>) - define i32 @vreduce_umax_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_umax_v1i32: ; CHECK: # %bb.0: @@ -5525,8 +5038,6 @@ define i32 @vreduce_umax_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>) - define i32 @vreduce_umax_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v2i32: ; CHECK: # %bb.0: @@ -5540,8 +5051,6 @@ define i32 @vreduce_umax_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>) - define i32 @vreduce_umax_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v4i32: ; CHECK: # %bb.0: @@ -5555,8 +5064,6 @@ define i32 @vreduce_umax_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>) - define i32 @vreduce_umax_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v8i32: ; CHECK: # %bb.0: @@ -5570,8 +5077,6 @@ define i32 @vreduce_umax_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>) - define i32 @vreduce_umax_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v16i32: ; CHECK: # %bb.0: @@ -5585,8 +5090,6 @@ define i32 @vreduce_umax_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v32i32(<32 x i32>) - define i32 @vreduce_umax_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v32i32: ; CHECK: # %bb.0: @@ -5601,8 +5104,6 @@ define i32 @vreduce_umax_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.v64i32(<64 x i32>) - define i32 @vreduce_umax_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_umax_v64i32: ; CHECK: # %bb.0: @@ -5620,8 +5121,6 @@ define i32 @vreduce_umax_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.umax.v1i64(<1 x i64>) - define i64 @vreduce_umax_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_umax_v1i64: ; RV32: # %bb.0: @@ -5641,8 +5140,6 @@ define i64 @vreduce_umax_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>) - define i64 @vreduce_umax_v2i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v2i64: ; RV32: # %bb.0: @@ -5668,8 +5165,6 @@ define i64 @vreduce_umax_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) - define i64 @vreduce_umax_v4i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v4i64: ; RV32: # %bb.0: @@ -5695,8 +5190,6 @@ define i64 @vreduce_umax_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>) - define i64 @vreduce_umax_v8i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v8i64: ; RV32: # %bb.0: @@ -5722,8 +5215,6 @@ define i64 @vreduce_umax_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>) - define i64 @vreduce_umax_v16i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v16i64: ; RV32: # %bb.0: @@ -5749,8 +5240,6 @@ define i64 @vreduce_umax_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v32i64(<32 x i64>) - define i64 @vreduce_umax_v32i64(ptr %x) { ; RV32-LABEL: vreduce_umax_v32i64: ; RV32: # %bb.0: @@ -5782,8 +5271,6 @@ define i64 @vreduce_umax_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.v64i64(<64 x i64>) - define i64 @vreduce_umax_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_umax_v64i64: ; RV32: # %bb.0: @@ -5827,8 +5314,6 @@ define i64 @vreduce_umax_v64i64(ptr %x) nounwind { ret i64 %red } -declare i8 @llvm.vector.reduce.mul.v1i8(<1 x i8>) - define i8 @vreduce_mul_v1i8(<1 x i8> %v) { ; CHECK-LABEL: vreduce_mul_v1i8: ; CHECK: # %bb.0: @@ -5839,8 +5324,6 @@ define i8 @vreduce_mul_v1i8(<1 x i8> %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>) - define i8 @vreduce_mul_v2i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v2i8: ; CHECK: # %bb.0: @@ -5855,8 +5338,6 @@ define i8 @vreduce_mul_v2i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>) - define i8 @vreduce_mul_v3i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v3i8: ; CHECK: # %bb.0: @@ -5879,8 +5360,6 @@ define i8 @vreduce_mul_v3i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>) - define i8 @vreduce_mul_v4i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v4i8: ; CHECK: # %bb.0: @@ -5897,8 +5376,6 @@ define i8 @vreduce_mul_v4i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>) - define i8 @vreduce_mul_v8i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v8i8: ; CHECK: # %bb.0: @@ -5917,8 +5394,6 @@ define i8 @vreduce_mul_v8i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>) - define i8 @vreduce_mul_v16i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v16i8: ; CHECK: # %bb.0: @@ -5939,8 +5414,6 @@ define i8 @vreduce_mul_v16i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>) - define i8 @vreduce_mul_v32i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v32i8: ; CHECK: # %bb.0: @@ -5964,8 +5437,6 @@ define i8 @vreduce_mul_v32i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v64i8(<64 x i8>) - define i8 @vreduce_mul_v64i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v64i8: ; CHECK: # %bb.0: @@ -5992,8 +5463,6 @@ define i8 @vreduce_mul_v64i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v128i8(<128 x i8>) - define i8 @vreduce_mul_v128i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v128i8: ; CHECK: # %bb.0: @@ -6023,8 +5492,6 @@ define i8 @vreduce_mul_v128i8(ptr %x) { ret i8 %red } -declare i8 @llvm.vector.reduce.mul.v256i8(<256 x i8>) - define i8 @vreduce_mul_v256i8(ptr %x) { ; CHECK-LABEL: vreduce_mul_v256i8: ; CHECK: # %bb.0: @@ -6057,8 +5524,6 @@ define i8 @vreduce_mul_v256i8(ptr %x) { ret i8 %red } -declare i16 @llvm.vector.reduce.mul.v1i16(<1 x i16>) - define i16 @vreduce_mul_v1i16(<1 x i16> %v) { ; CHECK-LABEL: vreduce_mul_v1i16: ; CHECK: # %bb.0: @@ -6069,8 +5534,6 @@ define i16 @vreduce_mul_v1i16(<1 x i16> %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>) - define i16 @vreduce_mul_v2i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v2i16: ; CHECK: # %bb.0: @@ -6085,8 +5548,6 @@ define i16 @vreduce_mul_v2i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>) - define i16 @vreduce_mul_v4i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v4i16: ; CHECK: # %bb.0: @@ -6103,8 +5564,6 @@ define i16 @vreduce_mul_v4i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>) - define i16 @vreduce_mul_v8i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v8i16: ; CHECK: # %bb.0: @@ -6123,8 +5582,6 @@ define i16 @vreduce_mul_v8i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>) - define i16 @vreduce_mul_v16i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v16i16: ; CHECK: # %bb.0: @@ -6145,8 +5602,6 @@ define i16 @vreduce_mul_v16i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v32i16(<32 x i16>) - define i16 @vreduce_mul_v32i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v32i16: ; CHECK: # %bb.0: @@ -6170,8 +5625,6 @@ define i16 @vreduce_mul_v32i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v64i16(<64 x i16>) - define i16 @vreduce_mul_v64i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v64i16: ; CHECK: # %bb.0: @@ -6198,8 +5651,6 @@ define i16 @vreduce_mul_v64i16(ptr %x) { ret i16 %red } -declare i16 @llvm.vector.reduce.mul.v128i16(<128 x i16>) - define i16 @vreduce_mul_v128i16(ptr %x) { ; CHECK-LABEL: vreduce_mul_v128i16: ; CHECK: # %bb.0: @@ -6229,8 +5680,6 @@ define i16 @vreduce_mul_v128i16(ptr %x) { ret i16 %red } -declare i32 @llvm.vector.reduce.mul.v1i32(<1 x i32>) - define i32 @vreduce_mul_v1i32(<1 x i32> %v) { ; CHECK-LABEL: vreduce_mul_v1i32: ; CHECK: # %bb.0: @@ -6241,8 +5690,6 @@ define i32 @vreduce_mul_v1i32(<1 x i32> %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>) - define i32 @vreduce_mul_v2i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v2i32: ; CHECK: # %bb.0: @@ -6257,8 +5704,6 @@ define i32 @vreduce_mul_v2i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>) - define i32 @vreduce_mul_v4i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v4i32: ; CHECK: # %bb.0: @@ -6275,8 +5720,6 @@ define i32 @vreduce_mul_v4i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>) - define i32 @vreduce_mul_v8i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v8i32: ; CHECK: # %bb.0: @@ -6295,8 +5738,6 @@ define i32 @vreduce_mul_v8i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>) - define i32 @vreduce_mul_v16i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v16i32: ; CHECK: # %bb.0: @@ -6317,8 +5758,6 @@ define i32 @vreduce_mul_v16i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v32i32(<32 x i32>) - define i32 @vreduce_mul_v32i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v32i32: ; CHECK: # %bb.0: @@ -6342,8 +5781,6 @@ define i32 @vreduce_mul_v32i32(ptr %x) { ret i32 %red } -declare i32 @llvm.vector.reduce.mul.v64i32(<64 x i32>) - define i32 @vreduce_mul_v64i32(ptr %x) { ; CHECK-LABEL: vreduce_mul_v64i32: ; CHECK: # %bb.0: @@ -6370,8 +5807,6 @@ define i32 @vreduce_mul_v64i32(ptr %x) { ret i32 %red } -declare i64 @llvm.vector.reduce.mul.v1i64(<1 x i64>) - define i64 @vreduce_mul_v1i64(<1 x i64> %v) { ; RV32-LABEL: vreduce_mul_v1i64: ; RV32: # %bb.0: @@ -6391,8 +5826,6 @@ define i64 @vreduce_mul_v1i64(<1 x i64> %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>) - define i64 @vreduce_mul_v2i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v2i64: ; RV32: # %bb.0: @@ -6421,8 +5854,6 @@ define i64 @vreduce_mul_v2i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>) - define i64 @vreduce_mul_v4i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v4i64: ; RV32: # %bb.0: @@ -6454,8 +5885,6 @@ define i64 @vreduce_mul_v4i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>) - define i64 @vreduce_mul_v8i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v8i64: ; RV32: # %bb.0: @@ -6491,8 +5920,6 @@ define i64 @vreduce_mul_v8i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>) - define i64 @vreduce_mul_v16i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v16i64: ; RV32: # %bb.0: @@ -6532,8 +5959,6 @@ define i64 @vreduce_mul_v16i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v32i64(<32 x i64>) - define i64 @vreduce_mul_v32i64(ptr %x) { ; RV32-LABEL: vreduce_mul_v32i64: ; RV32: # %bb.0: @@ -6578,8 +6003,6 @@ define i64 @vreduce_mul_v32i64(ptr %x) { ret i64 %red } -declare i64 @llvm.vector.reduce.mul.v64i64(<64 x i64>) - define i64 @vreduce_mul_v64i64(ptr %x) nounwind { ; RV32-LABEL: vreduce_mul_v64i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll index 276f6b077931b..8523ca957a8f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vp.reduce.and.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_and_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v1i1: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define zeroext i1 @vpreduce_and_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v1i1: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v1i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_and_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i1: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define zeroext i1 @vpreduce_and_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i1: ; CHECK: # %bb.0: @@ -82,8 +72,6 @@ define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i1: ; CHECK: # %bb.0: @@ -98,8 +86,6 @@ define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_and_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i1: ; CHECK: # %bb.0: @@ -114,8 +100,6 @@ define zeroext i1 @vpreduce_and_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i1: ; CHECK: # %bb.0: @@ -130,8 +114,6 @@ define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i1: ; CHECK: # %bb.0: @@ -146,8 +128,6 @@ define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_and_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v8i1: ; CHECK: # %bb.0: @@ -162,8 +142,6 @@ define zeroext i1 @vpreduce_and_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v8i1: ; CHECK: # %bb.0: @@ -178,8 +156,6 @@ define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v8i1: ; CHECK: # %bb.0: @@ -194,8 +170,6 @@ define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.and.v10i1(i1, <10 x i1>, <10 x i1>, i32) - define zeroext i1 @vpreduce_and_v10i1(i1 zeroext %s, <10 x i1> %v, <10 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v10i1: ; CHECK: # %bb.0: @@ -210,8 +184,6 @@ define zeroext i1 @vpreduce_and_v10i1(i1 zeroext %s, <10 x i1> %v, <10 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.and.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_and_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v16i1: ; CHECK: # %bb.0: @@ -226,8 +198,6 @@ define zeroext i1 @vpreduce_and_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32) - define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v256i1: ; CHECK: # %bb.0: @@ -261,8 +231,6 @@ define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> ret i1 %r } -declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v16i1: ; CHECK: # %bb.0: @@ -277,8 +245,6 @@ define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v16i1: ; CHECK: # %bb.0: @@ -293,8 +259,6 @@ define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v1i1: ; CHECK: # %bb.0: @@ -309,8 +273,6 @@ define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i1: ; CHECK: # %bb.0: @@ -325,8 +287,6 @@ define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i1: ; CHECK: # %bb.0: @@ -341,8 +301,6 @@ define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v8i1: ; CHECK: # %bb.0: @@ -357,8 +315,6 @@ define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v16i1: ; CHECK: # %bb.0: @@ -373,8 +329,6 @@ define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_smax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v1i1: ; CHECK: # %bb.0: @@ -389,8 +343,6 @@ define zeroext i1 @vpreduce_smax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_smax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i1: ; CHECK: # %bb.0: @@ -405,8 +357,6 @@ define zeroext i1 @vpreduce_smax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_smax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i1: ; CHECK: # %bb.0: @@ -421,8 +371,6 @@ define zeroext i1 @vpreduce_smax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_smax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v8i1: ; CHECK: # %bb.0: @@ -437,8 +385,6 @@ define zeroext i1 @vpreduce_smax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_smax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v16i1: ; CHECK: # %bb.0: @@ -453,8 +399,6 @@ define zeroext i1 @vpreduce_smax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_smax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v32i1: ; CHECK: # %bb.0: @@ -469,8 +413,6 @@ define zeroext i1 @vpreduce_smax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smax.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_smax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v64i1: ; CHECK: # %bb.0: @@ -485,8 +427,6 @@ define zeroext i1 @vpreduce_smax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v1i1: ; CHECK: # %bb.0: @@ -501,8 +441,6 @@ define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i1: ; CHECK: # %bb.0: @@ -517,8 +455,6 @@ define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i1: ; CHECK: # %bb.0: @@ -533,8 +469,6 @@ define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v8i1: ; CHECK: # %bb.0: @@ -549,8 +483,6 @@ define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v16i1: ; CHECK: # %bb.0: @@ -565,8 +497,6 @@ define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v32i1: ; CHECK: # %bb.0: @@ -581,8 +511,6 @@ define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v64i1: ; CHECK: # %bb.0: @@ -597,8 +525,6 @@ define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v1i1: ; CHECK: # %bb.0: @@ -613,8 +539,6 @@ define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i1: ; CHECK: # %bb.0: @@ -629,8 +553,6 @@ define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i1: ; CHECK: # %bb.0: @@ -645,8 +567,6 @@ define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v8i1: ; CHECK: # %bb.0: @@ -661,8 +581,6 @@ define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v16i1: ; CHECK: # %bb.0: @@ -677,8 +595,6 @@ define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v32i1: ; CHECK: # %bb.0: @@ -693,8 +609,6 @@ define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v64i1: ; CHECK: # %bb.0: @@ -709,8 +623,6 @@ define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define zeroext i1 @vpreduce_umin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v1i1: ; CHECK: # %bb.0: @@ -725,8 +637,6 @@ define zeroext i1 @vpreduce_umin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_umin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i1: ; CHECK: # %bb.0: @@ -741,8 +651,6 @@ define zeroext i1 @vpreduce_umin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_umin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i1: ; CHECK: # %bb.0: @@ -757,8 +665,6 @@ define zeroext i1 @vpreduce_umin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_umin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v8i1: ; CHECK: # %bb.0: @@ -773,8 +679,6 @@ define zeroext i1 @vpreduce_umin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_umin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v16i1: ; CHECK: # %bb.0: @@ -789,8 +693,6 @@ define zeroext i1 @vpreduce_umin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_umin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v32i1: ; CHECK: # %bb.0: @@ -805,8 +707,6 @@ define zeroext i1 @vpreduce_umin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.umin.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_umin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v64i1: ; CHECK: # %bb.0: @@ -821,8 +721,6 @@ define zeroext i1 @vpreduce_umin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v1i1(i1, <1 x i1>, <1 x i1>, i32) - define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v1i1: ; CHECK: # %bb.0: @@ -837,8 +735,6 @@ define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v2i1(i1, <2 x i1>, <2 x i1>, i32) - define zeroext i1 @vpreduce_mul_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v2i1: ; CHECK: # %bb.0: @@ -853,8 +749,6 @@ define zeroext i1 @vpreduce_mul_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v4i1(i1, <4 x i1>, <4 x i1>, i32) - define zeroext i1 @vpreduce_mul_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v4i1: ; CHECK: # %bb.0: @@ -869,8 +763,6 @@ define zeroext i1 @vpreduce_mul_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v8i1(i1, <8 x i1>, <8 x i1>, i32) - define zeroext i1 @vpreduce_mul_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v8i1: ; CHECK: # %bb.0: @@ -885,8 +777,6 @@ define zeroext i1 @vpreduce_mul_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3 ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v16i1(i1, <16 x i1>, <16 x i1>, i32) - define zeroext i1 @vpreduce_mul_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v16i1: ; CHECK: # %bb.0: @@ -901,8 +791,6 @@ define zeroext i1 @vpreduce_mul_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v32i1(i1, <32 x i1>, <32 x i1>, i32) - define zeroext i1 @vpreduce_mul_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v32i1: ; CHECK: # %bb.0: @@ -917,8 +805,6 @@ define zeroext i1 @vpreduce_mul_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.mul.v64i1(i1, <64 x i1>, <64 x i1>, i32) - define zeroext i1 @vpreduce_mul_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll index 97cf7e6902e32..7540495c0d3b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare <2 x half> @llvm.vp.rint.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f16: ; CHECK: # %bb.0: @@ -44,8 +42,6 @@ define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.rint.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f16: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.rint.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f16: ; CHECK: # %bb.0: @@ -124,8 +118,6 @@ define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f16: ; CHECK: # %bb.0: @@ -166,8 +158,6 @@ define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ret <16 x half> %v } -declare <2 x float> @llvm.vp.rint.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_rint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f32: ; CHECK: # %bb.0: @@ -204,8 +194,6 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.rint.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_rint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f32: ; CHECK: # %bb.0: @@ -242,8 +230,6 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f32: ; CHECK: # %bb.0: @@ -282,8 +268,6 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f32: ; CHECK: # %bb.0: @@ -322,8 +306,6 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) ret <16 x float> %v } -declare <2 x double> @llvm.vp.rint.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v2f64: ; RV32: # %bb.0: @@ -390,8 +372,6 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v4f64: ; RV32: # %bb.0: @@ -462,8 +442,6 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v8f64: ; RV32: # %bb.0: @@ -534,8 +512,6 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v15f64: ; RV32: # %bb.0: @@ -606,8 +582,6 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev ret <15 x double> %v } -declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v16f64: ; RV32: # %bb.0: @@ -678,8 +652,6 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev ret <16 x double> %v } -declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_rint_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll index 16c8b2b9da682..de5427f329496 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.round.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.round.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.round.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.round.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.round.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.round.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.round.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll index 14c550d555cf7..1c923e3f12171 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.roundeven.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) ret <2 x half> %v } -declare <4 x half> @llvm.vp.roundeven.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <8 x half> @llvm.vp.roundeven.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) ret <8 x half> %v } -declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e ret <16 x half> %v } -declare <2 x float> @llvm.vp.roundeven.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev ret <2 x float> %v } -declare <4 x float> @llvm.vp.roundeven.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev ret <4 x float> %v } -declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev ret <8 x float> %v } -declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext ret <16 x float> %v } -declare <2 x double> @llvm.vp.roundeven.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext % ret <2 x double> %v } -declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext % ret <4 x double> %v } -declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext % ret <8 x double> %v } -declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex ret <15 x double> %v } -declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex ret <16 x double> %v } -declare <32 x double> @llvm.vp.roundeven.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll index 16f04f14721d0..83cbd2b760341 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v2f16: ; ZVFH: # %bb.0: @@ -96,8 +94,6 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev ret <2 x half> %v } -declare <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v4f16: ; ZVFH: # %bb.0: @@ -184,8 +180,6 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev ret <4 x half> %v } -declare <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v8f16: ; ZVFH: # %bb.0: @@ -272,8 +266,6 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev ret <8 x half> %v } -declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v16f16: ; ZVFH: # %bb.0: @@ -362,8 +354,6 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext ret <16 x half> %v } -declare <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v2f32: ; CHECK: # %bb.0: @@ -404,8 +394,6 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext % ret <2 x float> %v } -declare <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v4f32: ; CHECK: # %bb.0: @@ -446,8 +434,6 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext % ret <4 x float> %v } -declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v8f32: ; CHECK: # %bb.0: @@ -490,8 +476,6 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext % ret <8 x float> %v } -declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v16f32: ; CHECK: # %bb.0: @@ -534,8 +518,6 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex ret <16 x float> %v } -declare <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v2f64: ; RV32ZVFH: # %bb.0: @@ -676,8 +658,6 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext ret <2 x double> %v } -declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v4f64: ; RV32ZVFH: # %bb.0: @@ -826,8 +806,6 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext ret <4 x double> %v } -declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v8f64: ; RV32ZVFH: # %bb.0: @@ -976,8 +954,6 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext ret <8 x double> %v } -declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v15f64: ; RV32ZVFH: # %bb.0: @@ -1126,8 +1102,6 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero ret <15 x double> %v } -declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v16f64: ; RV32ZVFH: # %bb.0: @@ -1276,8 +1250,6 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero ret <16 x double> %v } -declare <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_v32f64: ; RV32ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll index c0a213034c95b..71c32f1473b7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll @@ -178,12 +178,3 @@ entry: ret i32 %op.rdx.3 } -declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) -declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) -declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1) -declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) - -declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1) -declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) -declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) -declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll index ba64655947602..af3e9db9fe123 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -11,8 +11,6 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64 -declare <7 x i1> @llvm.vp.fcmp.v7f16(<7 x half>, <7 x half>, metadata, <7 x i1>, i32) - define <7 x i1> @fcmp_oeq_vv_v7f16(<7 x half> %va, <7 x half> %vb, <7 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_v7f16: ; ZVFH: # %bb.0: @@ -33,8 +31,6 @@ define <7 x i1> @fcmp_oeq_vv_v7f16(<7 x half> %va, <7 x half> %vb, <7 x i1> %m, ret <7 x i1> %v } -declare <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half>, <8 x half>, metadata, <8 x i1>, i32) - define <8 x i1> @fcmp_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_v8f16: ; ZVFH: # %bb.0: @@ -1055,8 +1051,6 @@ define <8 x i1> @fcmp_uno_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i3 ret <8 x i1> %v } -declare <128 x i1> @llvm.vp.fcmp.v128f16(<128 x half>, <128 x half>, metadata, <128 x i1>, i32) - define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_v128f16: ; ZVFH: # %bb.0: @@ -3361,8 +3355,6 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128 ret <128 x i1> %v } -declare <7 x i1> @llvm.vp.fcmp.v7f64(<7 x double>, <7 x double>, metadata, <7 x i1>, i32) - define <7 x i1> @fcmp_oeq_vv_v7f64(<7 x double> %va, <7 x double> %vb, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_v7f64: ; CHECK: # %bb.0: @@ -3374,8 +3366,6 @@ define <7 x i1> @fcmp_oeq_vv_v7f64(<7 x double> %va, <7 x double> %vb, <7 x i1> ret <7 x i1> %v } -declare <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double>, <8 x double>, metadata, <8 x i1>, i32) - define <8 x i1> @fcmp_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_v8f64: ; CHECK: # %bb.0: @@ -3914,8 +3904,6 @@ define <8 x i1> @fcmp_uno_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m ret <8 x i1> %v } -declare <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double>, <32 x double>, metadata, <32 x i1>, i32) - define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll index 456170b086463..36847c971d858 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1>, <2 x i1>, metadata, <2 x i1>, i32) - define <2 x i1> @icmp_eq_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define <2 x i1> @icmp_eq_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 ze ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1>, <4 x i1>, metadata, <4 x i1>, i32) - define <4 x i1> @icmp_eq_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define <4 x i1> @icmp_eq_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 ze ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1>, <8 x i1>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define <8 x i1> @icmp_eq_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 ze ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1>, <16 x i1>, metadata, <16 x i1>, i32) - define <16 x i1> @icmp_eq_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v16i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll index ad57a6037652f..efc0f7ef4a441 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -7,8 +7,6 @@ ; FIXME: We're missing canonicalizations of ISD::VP_SETCC equivalent to those ; for ISD::SETCC, e.g., splats aren't moved to the RHS. -declare <8 x i1> @llvm.vp.icmp.v8i7(<8 x i7>, <8 x i7>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i7(<8 x i7> %va, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i7: ; CHECK: # %bb.0: @@ -57,8 +55,6 @@ define <8 x i1> @icmp_eq_vx_swap_v8i7(<8 x i7> %va, i7 %b, <8 x i1> %m, i32 zero ret <8 x i1> %v } -declare <5 x i1> @llvm.vp.icmp.v5i8(<5 x i8>, <5 x i8>, metadata, <5 x i1>, i32) - define <5 x i1> @icmp_eq_vv_v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v5i8: ; CHECK: # %bb.0: @@ -93,8 +89,6 @@ define <5 x i1> @icmp_eq_vx_swap_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zero ret <5 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8>, <8 x i8>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i8: ; CHECK: # %bb.0: @@ -587,8 +581,6 @@ define <8 x i1> @icmp_sle_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e ret <8 x i1> %v } -declare <256 x i1> @llvm.vp.icmp.v256i8(<256 x i8>, <256 x i8>, metadata, <256 x i1>, i32) - define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v256i8: ; CHECK: # %bb.0: @@ -696,8 +688,6 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, ret <256 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i32: ; CHECK: # %bb.0: @@ -1235,8 +1225,6 @@ define <8 x i1> @icmp_sle_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext ret <8 x i1> %v } -declare <64 x i1> @llvm.vp.icmp.v64i32(<64 x i32>, <64 x i32>, metadata, <64 x i1>, i32) - define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v64i32: ; CHECK: # %bb.0: @@ -1345,8 +1333,6 @@ define <64 x i1> @icmp_eq_vx_swap_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i ret <64 x i1> %v } -declare <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64>, <8 x i64>, metadata, <8 x i1>, i32) - define <8 x i1> @icmp_eq_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll index bd9b66997ff8d..78bc2a2ebdaef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.sext.v4i16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i16> @vsext_v4i16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i1: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vsext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.sext.v4i32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i32> @vsext_v4i32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i1: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vsext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll index d8dc1f3588633..a452e5a9ffbb8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.sext.v4i16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i16> @vsext_v4i16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i8: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vsext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.sext.v4i32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i32> @vsext_v4i32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i8: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vsext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i8: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define <4 x i64> @vsext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i32> @llvm.vp.sext.v4i32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i32> @vsext_v4i32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i16: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define <4 x i32> @vsext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i16: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define <4 x i64> @vsext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i64> @llvm.vp.sext.v4i64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i64> @vsext_v4i64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i32: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define <4 x i64> @vsext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32) - define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v32i64_v32i32: ; CHECK: # %bb.0: @@ -202,8 +188,6 @@ define <32 x i64> @vsext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl ret <32 x i64> %v } -declare <4 x i16> @llvm.vp.sext.v4i16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i16> @vsext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i7: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define <4 x i16> @vsext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i8> @llvm.vp.sext.v4i8.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i8> @vsext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i8_v4i7: ; CHECK: # %bb.0: @@ -229,8 +211,6 @@ define <4 x i8> @vsext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i15> @llvm.vp.sext.v4i15.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i15> @vsext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i15_v4i8: ; CHECK: # %bb.0: @@ -242,8 +222,6 @@ define <4 x i15> @vsext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i15> %v } -declare <4 x i15> @llvm.vp.sext.v4i15.v4i9(<4 x i9>, <4 x i1>, i32) - define <4 x i15> @vsext_v4i15_v4i9(<4 x i9> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i15_v4i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll index a1390a8b1c0de..8d6e9a2dee0a4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh < %s | FileCheck %s -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i1: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define <4 x half> @vsitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i1: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x float> @vsitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll index 9f1f98893d04c..afa8f2fda2ed4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i7: ; ZVFH: # %bb.0: @@ -35,8 +33,6 @@ define <4 x half> @vsitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i8: ; ZVFH: # %bb.0: @@ -77,8 +73,6 @@ define <4 x half> @vsitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i16: ; ZVFH: # %bb.0: @@ -115,8 +109,6 @@ define <4 x half> @vsitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i32: ; ZVFH: # %bb.0: @@ -155,8 +147,6 @@ define <4 x half> @vsitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x half> @vsitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_v4f16_v4i64: ; ZVFH: # %bb.0: @@ -197,8 +187,6 @@ define <4 x half> @vsitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i8: ; CHECK: # %bb.0: @@ -221,8 +209,6 @@ define <4 x float> @vsitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i16: ; CHECK: # %bb.0: @@ -245,8 +231,6 @@ define <4 x float> @vsitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i32: ; CHECK: # %bb.0: @@ -267,8 +251,6 @@ define <4 x float> @vsitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x float> @vsitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i64: ; CHECK: # %bb.0: @@ -291,8 +273,6 @@ define <4 x float> @vsitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i8: ; CHECK: # %bb.0: @@ -315,8 +295,6 @@ define <4 x double> @vsitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i16: ; CHECK: # %bb.0: @@ -339,8 +317,6 @@ define <4 x double> @vsitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i32: ; CHECK: # %bb.0: @@ -363,8 +339,6 @@ define <4 x double> @vsitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x double> @vsitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i64: ; CHECK: # %bb.0: @@ -385,8 +359,6 @@ define <4 x double> @vsitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %ev ret <4 x double> %v } -declare <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v32f64_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll index 9812e9832856d..7032ed925d29f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <2 x i8> @llvm.stepvector.v2i8() - define <2 x i8> @stepvector_v2i8() { ; CHECK-LABEL: stepvector_v2i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <2 x i8> @stepvector_v2i8() { ret <2 x i8> %v } -declare <3 x i8> @llvm.stepvector.v3i8() - define <3 x i8> @stepvector_v3i8() { ; CHECK-LABEL: stepvector_v3i8: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define <3 x i8> @stepvector_v3i8() { ret <3 x i8> %v } -declare <4 x i8> @llvm.stepvector.v4i8() - define <4 x i8> @stepvector_v4i8() { ; CHECK-LABEL: stepvector_v4i8: ; CHECK: # %bb.0: @@ -38,8 +32,6 @@ define <4 x i8> @stepvector_v4i8() { ret <4 x i8> %v } -declare <8 x i8> @llvm.stepvector.v8i8() - define <8 x i8> @stepvector_v8i8() { ; CHECK-LABEL: stepvector_v8i8: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define <8 x i8> @stepvector_v8i8() { ret <8 x i8> %v } -declare <16 x i8> @llvm.stepvector.v16i8() - define <16 x i8> @stepvector_v16i8() { ; CHECK-LABEL: stepvector_v16i8: ; CHECK: # %bb.0: @@ -62,8 +52,6 @@ define <16 x i8> @stepvector_v16i8() { ret <16 x i8> %v } -declare <2 x i16> @llvm.stepvector.v2i16() - define <2 x i16> @stepvector_v2i16() { ; CHECK-LABEL: stepvector_v2i16: ; CHECK: # %bb.0: @@ -74,8 +62,6 @@ define <2 x i16> @stepvector_v2i16() { ret <2 x i16> %v } -declare <4 x i16> @llvm.stepvector.v4i16() - define <4 x i16> @stepvector_v4i16() { ; CHECK-LABEL: stepvector_v4i16: ; CHECK: # %bb.0: @@ -86,8 +72,6 @@ define <4 x i16> @stepvector_v4i16() { ret <4 x i16> %v } -declare <8 x i16> @llvm.stepvector.v8i16() - define <8 x i16> @stepvector_v8i16() { ; CHECK-LABEL: stepvector_v8i16: ; CHECK: # %bb.0: @@ -98,8 +82,6 @@ define <8 x i16> @stepvector_v8i16() { ret <8 x i16> %v } -declare <16 x i16> @llvm.stepvector.v16i16() - define <16 x i16> @stepvector_v16i16() { ; CHECK-LABEL: stepvector_v16i16: ; CHECK: # %bb.0: @@ -110,8 +92,6 @@ define <16 x i16> @stepvector_v16i16() { ret <16 x i16> %v } -declare <2 x i32> @llvm.stepvector.v2i32() - define <2 x i32> @stepvector_v2i32() { ; CHECK-LABEL: stepvector_v2i32: ; CHECK: # %bb.0: @@ -122,8 +102,6 @@ define <2 x i32> @stepvector_v2i32() { ret <2 x i32> %v } -declare <4 x i32> @llvm.stepvector.v4i32() - define <4 x i32> @stepvector_v4i32() { ; CHECK-LABEL: stepvector_v4i32: ; CHECK: # %bb.0: @@ -134,8 +112,6 @@ define <4 x i32> @stepvector_v4i32() { ret <4 x i32> %v } -declare <8 x i32> @llvm.stepvector.v8i32() - define <8 x i32> @stepvector_v8i32() { ; CHECK-LABEL: stepvector_v8i32: ; CHECK: # %bb.0: @@ -146,8 +122,6 @@ define <8 x i32> @stepvector_v8i32() { ret <8 x i32> %v } -declare <16 x i32> @llvm.stepvector.v16i32() - define <16 x i32> @stepvector_v16i32() { ; CHECK-LABEL: stepvector_v16i32: ; CHECK: # %bb.0: @@ -158,8 +132,6 @@ define <16 x i32> @stepvector_v16i32() { ret <16 x i32> %v } -declare <2 x i64> @llvm.stepvector.v2i64() - define <2 x i64> @stepvector_v2i64() { ; CHECK-LABEL: stepvector_v2i64: ; CHECK: # %bb.0: @@ -170,8 +142,6 @@ define <2 x i64> @stepvector_v2i64() { ret <2 x i64> %v } -declare <4 x i64> @llvm.stepvector.v4i64() - define <4 x i64> @stepvector_v4i64() { ; CHECK-LABEL: stepvector_v4i64: ; CHECK: # %bb.0: @@ -182,8 +152,6 @@ define <4 x i64> @stepvector_v4i64() { ret <4 x i64> %v } -declare <8 x i64> @llvm.stepvector.v8i64() - define <8 x i64> @stepvector_v8i64() { ; CHECK-LABEL: stepvector_v8i64: ; CHECK: # %bb.0: @@ -194,8 +162,6 @@ define <8 x i64> @stepvector_v8i64() { ret <8 x i64> %v } -declare <16 x i64> @llvm.stepvector.v16i64() - define <16 x i64> @stepvector_v16i64() { ; CHECK-LABEL: stepvector_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll index 056f55260b854..6cbf32151e748 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll @@ -623,11 +623,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) -declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i32>) -declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32 immarg, <32 x i1>) -declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32 immarg, <8 x i1>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @gather_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: gather_of_pointers: @@ -757,8 +752,6 @@ bb18: ; preds = %bb2 ret void } -declare <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x ptr>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @scatter_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: scatter_of_pointers: @@ -888,8 +881,6 @@ bb18: ; preds = %bb2 ret void } -declare void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr>, <2 x ptr>, i32 immarg, <2 x i1>) - define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 signext %arg2) { ; CHECK-LABEL: strided_load_startval_add_with_splat: ; CHECK: # %bb.0: # %bb @@ -1010,9 +1001,6 @@ bb35: ; preds = %bb35, %bb32 br i1 %i45, label %bb34, label %bb35 } -declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i8>) -declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32 immarg, <16 x i1>) - define void @gather_no_scalar_remainder(ptr noalias nocapture noundef %arg, ptr noalias nocapture noundef readonly %arg1, i64 noundef %arg2) { ; CHECK-LABEL: gather_no_scalar_remainder: ; CHECK: # %bb.0: # %bb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll index 8f7d738fe6d91..ef09a3fb6d5fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll @@ -140,4 +140,3 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll index 62b65ddd3d19a..108c75c8c4abc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll @@ -622,11 +622,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i8>) -declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i32>) -declare void @llvm.masked.scatter.v32i8.v32p0(<32 x i8>, <32 x ptr>, i32 immarg, <32 x i1>) -declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32 immarg, <8 x i1>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @gather_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: @gather_of_pointers( @@ -702,8 +697,6 @@ bb18: ; preds = %bb2 ret void } -declare <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x ptr>) - ; Make sure we don't crash in getTgtMemIntrinsic for a vector of pointers. define void @scatter_of_pointers(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1) { ; V-LABEL: @scatter_of_pointers( @@ -779,8 +772,6 @@ bb18: ; preds = %bb2 ret void } -declare void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr>, <2 x ptr>, i32 immarg, <2 x i1>) - define void @strided_load_startval_add_with_splat(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 signext %arg2) { ; CHECK-LABEL: @strided_load_startval_add_with_splat( ; CHECK-NEXT: bb: @@ -896,9 +887,6 @@ bb35: ; preds = %bb35, %bb32 br i1 %i45, label %bb34, label %bb35 } -declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i8>) -declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32 immarg, <16 x i1>) - define void @gather_no_scalar_remainder(ptr noalias nocapture noundef %arg, ptr noalias nocapture noundef readonly %arg1, i64 noundef %arg2) { ; CHECK-LABEL: @gather_no_scalar_remainder( ; CHECK-NEXT: bb: @@ -964,8 +952,6 @@ entry: ret <8 x i8> %3 } -declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i8>) - define void @gather_narrow_idx(ptr noalias nocapture %A, ptr noalias nocapture readonly %B) { ; CHECK-LABEL: @gather_narrow_idx( ; CHECK-NEXT: entry: @@ -1101,7 +1087,6 @@ vector.body: ; preds = %vector.body, %entry %i2 = mul nuw nsw <32 x i64> %vec.ind, splat (i64 5) %i3 = getelementptr inbounds i8, ptr %A, <32 x i64> %i2 - %elems = sub i64 1024, %index %evl = call i32 @llvm.experimental.get.vector.length.i64(i64 %elems, i32 32, i1 false) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll index 4b7f82f94f5e4..8af4ced77be39 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -24,8 +24,6 @@ ; RUN: -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,CHECK-NO-OPT,CHECK-NO-OPT-ZVFHMIN -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i8(ptr, i8, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8_i8(ptr %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i8_i8: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define <2 x i8> @strided_vpload_v2i8_i8(ptr %ptr, i8 signext %stride, <2 x i1> % ret <2 x i8> %load } -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i16(ptr, i16, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8_i16(ptr %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i8_i16: ; CHECK: # %bb.0: @@ -48,8 +44,6 @@ define <2 x i8> @strided_vpload_v2i8_i16(ptr %ptr, i16 signext %stride, <2 x i1> ret <2 x i8> %load } -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr, i64, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8_i64(ptr %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2i8_i64: ; CHECK-RV32: # %bb.0: @@ -66,8 +60,6 @@ define <2 x i8> @strided_vpload_v2i8_i64(ptr %ptr, i64 signext %stride, <2 x i1> ret <2 x i8> %load } -declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i8> @strided_vpload_v2i8(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i8: ; CHECK: # %bb.0: @@ -78,8 +70,6 @@ define <2 x i8> @strided_vpload_v2i8(ptr %ptr, i32 signext %stride, <2 x i1> %m, ret <2 x i8> %load } -declare <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i8> @strided_vpload_v4i8(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i8: ; CHECK: # %bb.0: @@ -100,8 +90,6 @@ define <4 x i8> @strided_vpload_v4i8_allones_mask(ptr %ptr, i32 signext %stride, ret <4 x i8> %load } -declare <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i8: ; CHECK: # %bb.0: @@ -122,8 +110,6 @@ define <8 x i8> @strided_vpload_v8i8_unit_stride(ptr %ptr, <8 x i1> %m, i32 zero ret <8 x i8> %load } -declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i16: ; CHECK: # %bb.0: @@ -134,8 +120,6 @@ define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> % ret <2 x i16> %load } -declare <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i16> @strided_vpload_v4i16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i16: ; CHECK: # %bb.0: @@ -146,8 +130,6 @@ define <4 x i16> @strided_vpload_v4i16(ptr %ptr, i32 signext %stride, <4 x i1> % ret <4 x i16> %load } -declare <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i16: ; CHECK: # %bb.0: @@ -178,8 +160,6 @@ define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %strid ret <8 x i16> %load } -declare <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i32> @strided_vpload_v2i32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i32: ; CHECK: # %bb.0: @@ -190,8 +170,6 @@ define <2 x i32> @strided_vpload_v2i32(ptr %ptr, i32 signext %stride, <2 x i1> % ret <2 x i32> %load } -declare <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i32: ; CHECK: # %bb.0: @@ -212,8 +190,6 @@ define <4 x i32> @strided_vpload_v4i32_unit_stride(ptr %ptr, <4 x i1> %m, i32 ze ret <4 x i32> %load } -declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i32> @strided_vpload_v8i32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i32: ; CHECK: # %bb.0: @@ -234,8 +210,6 @@ define <8 x i32> @strided_vpload_v8i32_allones_mask(ptr %ptr, i32 signext %strid ret <8 x i32> %load } -declare <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i64: ; CHECK: # %bb.0: @@ -256,8 +230,6 @@ define <2 x i64> @strided_vpload_v2i64_unit_stride(ptr %ptr, <2 x i1> %m, i32 ze ret <2 x i64> %load } -declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x i64> @strided_vpload_v4i64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i64: ; CHECK: # %bb.0: @@ -278,8 +250,6 @@ define <4 x i64> @strided_vpload_v4i64_allones_mask(ptr %ptr, i32 signext %strid ret <4 x i64> %load } -declare <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x i64> @strided_vpload_v8i64(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i64: ; CHECK: # %bb.0: @@ -290,8 +260,6 @@ define <8 x i64> @strided_vpload_v8i64(ptr %ptr, i32 signext %stride, <8 x i1> % ret <8 x i64> %load } -declare <2 x bfloat> @llvm.experimental.vp.strided.load.v2bf16.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x bfloat> @strided_vpload_v2bf16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2bf16: ; CHECK: # %bb.0: @@ -312,8 +280,6 @@ define <2 x bfloat> @strided_vpload_v2bf16_allones_mask(ptr %ptr, i32 signext %s ret <2 x bfloat> %load } -declare <4 x bfloat> @llvm.experimental.vp.strided.load.v4bf16.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x bfloat> @strided_vpload_v4bf16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4bf16: ; CHECK: # %bb.0: @@ -324,8 +290,6 @@ define <4 x bfloat> @strided_vpload_v4bf16(ptr %ptr, i32 signext %stride, <4 x i ret <4 x bfloat> %load } -declare <8 x bfloat> @llvm.experimental.vp.strided.load.v8bf16.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x bfloat> @strided_vpload_v8bf16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8bf16: ; CHECK: # %bb.0: @@ -346,8 +310,6 @@ define <8 x bfloat> @strided_vpload_v8bf16_unit_stride(ptr %ptr, <8 x i1> %m, i3 ret <8 x bfloat> %load } -declare <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x half> @strided_vpload_v2f16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f16: ; CHECK: # %bb.0: @@ -368,8 +330,6 @@ define <2 x half> @strided_vpload_v2f16_allones_mask(ptr %ptr, i32 signext %stri ret <2 x half> %load } -declare <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x half> @strided_vpload_v4f16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f16: ; CHECK: # %bb.0: @@ -380,8 +340,6 @@ define <4 x half> @strided_vpload_v4f16(ptr %ptr, i32 signext %stride, <4 x i1> ret <4 x half> %load } -declare <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f16: ; CHECK: # %bb.0: @@ -402,8 +360,6 @@ define <8 x half> @strided_vpload_v8f16_unit_stride(ptr %ptr, <8 x i1> %m, i32 z ret <8 x half> %load } -declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f32: ; CHECK: # %bb.0: @@ -414,8 +370,6 @@ define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> ret <2 x float> %load } -declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f32: ; CHECK: # %bb.0: @@ -436,8 +390,6 @@ define <4 x float> @strided_vpload_v4f32_unit_stride(ptr %ptr, <4 x i1> %m, i32 ret <4 x float> %load } -declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x float> @strided_vpload_v8f32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f32: ; CHECK: # %bb.0: @@ -458,8 +410,6 @@ define <8 x float> @strided_vpload_v8f32_allones_mask(ptr %ptr, i32 signext %str ret <8 x float> %load } -declare <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr, i32, <2 x i1>, i32) - define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f64: ; CHECK: # %bb.0: @@ -480,9 +430,6 @@ define <2 x double> @strided_vpload_v2f64_unit_stride(ptr %ptr, <2 x i1> %m, i32 ret <2 x double> %load } - -declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr, i32, <4 x i1>, i32) - define <4 x double> @strided_vpload_v4f64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f64: ; CHECK: # %bb.0: @@ -503,8 +450,6 @@ define <4 x double> @strided_vpload_v4f64_allones_mask(ptr %ptr, i32 signext %st ret <4 x double> %load } -declare <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0.i32(ptr, i32, <8 x i1>, i32) - define <8 x double> @strided_vpload_v8f64(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f64: ; CHECK: # %bb.0: @@ -536,8 +481,6 @@ define <3 x double> @strided_vpload_v3f64_allones_mask(ptr %ptr, i32 signext %st ret <3 x double> %v } -declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, <3 x i1>, i32) - ; Splitting define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind { ; CHECK-LABEL: strided_vpload_v32f64: @@ -593,8 +536,6 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext % ret <32 x double> %load } -declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32, <32 x i1>, i32) - ; Widening + splitting (with HiIsEmpty == true) define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_v33f64: @@ -702,8 +643,6 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ret <33 x double> %v } -declare <33 x double> @llvm.experimental.vp.strided.load.v33f64.p0.i64(ptr, i64, <33 x i1>, i32) - ; Test unmasked integer zero strided define <4 x i8> @zero_strided_unmasked_vpload_4i8_i8(ptr %ptr) { ; CHECK-OPT-LABEL: zero_strided_unmasked_vpload_4i8_i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll index 7ca329835b7ac..25624ea0fcf6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64 -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i8(<2 x i8>, ptr, i8, <2 x i1>, i32) - define void @strided_vpstore_v2i8_i8(<2 x i8> %val, ptr %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i8_i8: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define void @strided_vpstore_v2i8_i8(<2 x i8> %val, ptr %ptr, i8 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i16(<2 x i8>, ptr, i16, <2 x i1>, i32) - define void @strided_vpstore_v2i8_i16(<2 x i8> %val, ptr %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i8_i16: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define void @strided_vpstore_v2i8_i16(<2 x i8> %val, ptr %ptr, i16 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i64(<2 x i8>, ptr, i64, <2 x i1>, i32) - define void @strided_vpstore_v2i8_i64(<2 x i8> %val, ptr %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8_i64: ; CHECK-RV32: # %bb.0: @@ -54,8 +48,6 @@ define void @strided_vpstore_v2i8_i64(<2 x i8> %val, ptr %ptr, i64 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i8(<2 x i8> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i8: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define void @strided_vpstore_v2i8(<2 x i8> %val, ptr %ptr, i32 signext %stride, ret void } -declare void @llvm.experimental.vp.strided.store.v4i8.p0.i32(<4 x i8>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i8(<4 x i8> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i8: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define void @strided_vpstore_v4i8(<4 x i8> %val, ptr %ptr, i32 signext %stride, ret void } -declare void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i8: ; CHECK: # %bb.0: @@ -100,8 +88,6 @@ define void @strided_vpstore_v8i8_unit_stride(<8 x i8> %val, ptr %ptr, <8 x i1> ret void } -declare void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i16: ; CHECK: # %bb.0: @@ -112,8 +98,6 @@ define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v4i16.p0.i32(<4 x i16>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i16(<4 x i16> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i16: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define void @strided_vpstore_v4i16(<4 x i16> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i16: ; CHECK: # %bb.0: @@ -146,8 +128,6 @@ define void @strided_vpstore_v8i16_unit_stride(<8 x i16> %val, ptr %ptr, <8 x i1 ret void } -declare void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i32: ; CHECK: # %bb.0: @@ -158,8 +138,6 @@ define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i32: ; CHECK: # %bb.0: @@ -180,8 +158,6 @@ define void @strided_vpstore_v4i32_unit_stride(<4 x i32> %val, ptr %ptr, <4 x i1 ret void } -declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i32: ; CHECK: # %bb.0: @@ -192,8 +168,6 @@ define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2i64: ; CHECK: # %bb.0: @@ -214,8 +188,6 @@ define void @strided_vpstore_v2i64_unit_stride(<2 x i64> %val, ptr %ptr, <2 x i1 ret void } -declare void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4i64: ; CHECK: # %bb.0: @@ -226,8 +198,6 @@ define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v8i64.p0.i32(<8 x i64>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8i64(<8 x i64> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8i64: ; CHECK: # %bb.0: @@ -238,8 +208,6 @@ define void @strided_vpstore_v8i64(<8 x i64> %val, ptr %ptr, i32 signext %stride ret void } -declare void @llvm.experimental.vp.strided.store.v2bf16.p0.i32(<2 x bfloat>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2bf16(<2 x bfloat> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2bf16: ; CHECK: # %bb.0: @@ -250,8 +218,6 @@ define void @strided_vpstore_v2bf16(<2 x bfloat> %val, ptr %ptr, i32 signext %st ret void } -declare void @llvm.experimental.vp.strided.store.v4bf16.p0.i32(<4 x bfloat>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4bf16(<4 x bfloat> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4bf16: ; CHECK: # %bb.0: @@ -262,8 +228,6 @@ define void @strided_vpstore_v4bf16(<4 x bfloat> %val, ptr %ptr, i32 signext %st ret void } -declare void @llvm.experimental.vp.strided.store.v8bf16.p0.i32(<8 x bfloat>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8bf16(<8 x bfloat> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8bf16: ; CHECK: # %bb.0: @@ -284,8 +248,6 @@ define void @strided_vpstore_v8bf16_unit_stride(<8 x bfloat> %val, ptr %ptr, <8 ret void } -declare void @llvm.experimental.vp.strided.store.v2f16.p0.i32(<2 x half>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2f16(<2 x half> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f16: ; CHECK: # %bb.0: @@ -296,8 +258,6 @@ define void @strided_vpstore_v2f16(<2 x half> %val, ptr %ptr, i32 signext %strid ret void } -declare void @llvm.experimental.vp.strided.store.v4f16.p0.i32(<4 x half>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4f16(<4 x half> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f16: ; CHECK: # %bb.0: @@ -308,8 +268,6 @@ define void @strided_vpstore_v4f16(<4 x half> %val, ptr %ptr, i32 signext %strid ret void } -declare void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f16: ; CHECK: # %bb.0: @@ -330,8 +288,6 @@ define void @strided_vpstore_v8f16_unit_stride(<8 x half> %val, ptr %ptr, <8 x i ret void } -declare void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f32: ; CHECK: # %bb.0: @@ -342,8 +298,6 @@ define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f32: ; CHECK: # %bb.0: @@ -364,8 +318,6 @@ define void @strided_vpstore_v4f32_unit_stride(<4 x float> %val, ptr %ptr, <4 x ret void } -declare void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f32: ; CHECK: # %bb.0: @@ -376,8 +328,6 @@ define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stri ret void } -declare void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double>, ptr, i32, <2 x i1>, i32) - define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v2f64: ; CHECK: # %bb.0: @@ -398,8 +348,6 @@ define void @strided_vpstore_v2f64_unit_stride(<2 x double> %val, ptr %ptr, <2 x ret void } -declare void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double>, ptr, i32, <4 x i1>, i32) - define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v4f64: ; CHECK: # %bb.0: @@ -410,8 +358,6 @@ define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %str ret void } -declare void @llvm.experimental.vp.strided.store.v8f64.p0.i32(<8 x double>, ptr, i32, <8 x i1>, i32) - define void @strided_vpstore_v8f64(<8 x double> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_v8f64: ; CHECK: # %bb.0: @@ -453,8 +399,6 @@ define void @strided_vpstore_v3f32_allones_mask(<3 x float> %v, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float>, ptr , i32, <3 x i1>, i32) - ; Splitting define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %stride, <32 x i1> %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_store_v32f64: @@ -506,4 +450,3 @@ define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 s ret void } -declare void @llvm.experimental.vp.strided.store.v32f64.p0.i32(<32 x double>, ptr, i32, <32 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll index 3b1dc298c12ce..e0b3d04332067 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll @@ -1,17 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) - -declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>) -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) - define void @trunc_sat_i8i16_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i8i16_maxmin: ; CHECK: # %bb.0: @@ -134,7 +123,6 @@ define void @trunc_sat_u8u16_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i16i32_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i16i32_notopt: ; CHECK: # %bb.0: @@ -261,7 +249,6 @@ define void @trunc_sat_u16u32_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i32i64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i32i64_notopt: ; CHECK: # %bb.0: @@ -317,7 +304,6 @@ define void @trunc_sat_i32i64_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_notopt: ; CHECK: # %bb.0: @@ -352,7 +338,6 @@ define void @trunc_sat_u32u64_min(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_maxmin: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll index b6ef97603d61c..05ac11d1ca9d7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i1> @vtrunc_v2i1_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <2 x i1> @vtrunc_v2i1_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { ret <2 x i1> %v } -declare <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i1> @vtrunc_v2i1_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <2 x i1> @vtrunc_v2i1_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ret <2 x i1> %v } -declare <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i1> @vtrunc_v2i1_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i1_v2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll index 461b4d0e02cb8..f992d1f8f7eee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i7_v2i16: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ret <2 x i7> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i15: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { ret <2 x i8> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i16: ; CHECK: # %bb.0: @@ -48,8 +42,6 @@ define <2 x i8> @vtrunc_v2i8_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { ret <2 x i8> %v } -declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32) - define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v128i7_v128i16: ; CHECK: # %bb.0: @@ -79,8 +71,6 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero ret <128 x i7> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i32: ; CHECK: # %bb.0: @@ -105,8 +95,6 @@ define <2 x i8> @vtrunc_v2i8_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ret <2 x i8> %v } -declare <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i8_v2i64: ; CHECK: # %bb.0: @@ -135,8 +123,6 @@ define <2 x i8> @vtrunc_v2i8_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ret <2 x i8> %v } -declare <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32>, <2 x i1>, i32) - define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i16_v2i32: ; CHECK: # %bb.0: @@ -157,8 +143,6 @@ define <2 x i16> @vtrunc_v2i16_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ret <2 x i16> %v } -declare <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i16_v2i64: ; CHECK: # %bb.0: @@ -183,8 +167,6 @@ define <2 x i16> @vtrunc_v2i16_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ret <2 x i16> %v } -declare <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64>, <15 x i1>, i32) - define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v15i16_v15i64: ; CHECK: # %bb.0: @@ -197,8 +179,6 @@ define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext ret <15 x i16> %v } -declare <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64>, <2 x i1>, i32) - define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i32_v2i64: ; CHECK: # %bb.0: @@ -219,8 +199,6 @@ define <2 x i32> @vtrunc_v2i32_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ret <2 x i32> %v } -declare <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64>, <128 x i1>, i32) - define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) { ; RV32-LABEL: vtrunc_v128i32_v128i64: ; RV32: # %bb.0: @@ -845,8 +823,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze ret <128 x i32> %v } -declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v32i32_v32i64: ; CHECK: # %bb.0: @@ -876,8 +852,6 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext ret <32 x i32> %v } -declare <2 x i7> @llvm.vp.trunc.v2i7.v2i8(<2 x i8>, <2 x i1>, i32) - define <2 x i7> @vtrunc_v2i7_v2i8(<2 x i8> %a, <2 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_v2i7_v2i8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll index e625c46a57145..8aaf1e7fa2330 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zvfh < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfh < %s | FileCheck %s -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i1: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define <4 x half> @vuitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i1: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x float> @vuitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll index b72e3cfcb920a..3d1febe95421f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zvfhmin < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i7: ; ZVFH: # %bb.0: @@ -35,8 +33,6 @@ define <4 x half> @vuitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %ev ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i8: ; ZVFH: # %bb.0: @@ -77,8 +73,6 @@ define <4 x half> @vuitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i16: ; ZVFH: # %bb.0: @@ -115,8 +109,6 @@ define <4 x half> @vuitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i32: ; ZVFH: # %bb.0: @@ -155,8 +147,6 @@ define <4 x half> @vuitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x half> @llvm.vp.uitofp.v4f16.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x half> @vuitofp_v4f16_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_v4f16_v4i64: ; ZVFH: # %bb.0: @@ -197,8 +187,6 @@ define <4 x half> @vuitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) ret <4 x half> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i8: ; CHECK: # %bb.0: @@ -221,8 +209,6 @@ define <4 x float> @vuitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i16: ; CHECK: # %bb.0: @@ -245,8 +231,6 @@ define <4 x float> @vuitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i32: ; CHECK: # %bb.0: @@ -267,8 +251,6 @@ define <4 x float> @vuitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x float> @llvm.vp.uitofp.v4f32.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x float> @vuitofp_v4f32_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i64: ; CHECK: # %bb.0: @@ -291,8 +273,6 @@ define <4 x float> @vuitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl ret <4 x float> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i8: ; CHECK: # %bb.0: @@ -315,8 +295,6 @@ define <4 x double> @vuitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i16: ; CHECK: # %bb.0: @@ -339,8 +317,6 @@ define <4 x double> @vuitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i32: ; CHECK: # %bb.0: @@ -363,8 +339,6 @@ define <4 x double> @vuitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %ev ret <4 x double> %v } -declare <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64>, <4 x i1>, i32) - define <4 x double> @vuitofp_v4f64_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i64: ; CHECK: # %bb.0: @@ -385,8 +359,6 @@ define <4 x double> @vuitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %ev ret <4 x double> %v } -declare <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32) - define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v32f64_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll index fa39b06b4d779..8f4744b92b1ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -72,8 +72,6 @@ define void @store_v4i32_align2(<4 x i32> %x, ptr %ptr) { ret void } -declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>) - define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-SLOW-LABEL: mgather_v2i16_align1: ; RV32-SLOW: # %bb.0: @@ -160,8 +158,6 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> % ret <2 x i16> %v } -declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>) - define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %passthru) { ; RV32-SLOW-LABEL: mgather_v2i64_align4: ; RV32-SLOW: # %bb.0: @@ -244,8 +240,6 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> % ret <2 x i64> %v } -declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>) - define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; RV32-SLOW-LABEL: mscatter_v4i16_align1: ; RV32-SLOW: # %bb.0: @@ -392,8 +386,6 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m) ret void } -declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>) - define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m) { ; RV32-SLOW-LABEL: mscatter_v2i32_align2: ; RV32-SLOW: # %bb.0: @@ -476,8 +468,6 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m) ret void } -declare <2 x i32> @llvm.masked.load.v2i32(ptr, i32, <2 x i1>, <2 x i32>) - define void @masked_load_v2i32_align1(ptr %a, <2 x i32> %m, ptr %res_ptr) nounwind { ; RV32-SLOW-LABEL: masked_load_v2i32_align1: ; RV32-SLOW: # %bb.0: @@ -580,8 +570,6 @@ define void @masked_load_v2i32_align1(ptr %a, <2 x i32> %m, ptr %res_ptr) nounwi ret void } -declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>) - define void @masked_store_v2i32_align2(<2 x i32> %val, ptr %a, <2 x i32> %m) nounwind { ; SLOW-LABEL: masked_store_v2i32_align2: ; SLOW: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll index 2f30cf4f88096..391f54adb4c11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare <2 x i1> @llvm.vp.add.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroex ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.add.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroex ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.add.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroex ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.add.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 z ret <16 x i1> %v } -declare <32 x i1> @llvm.vp.add.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32) - define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v32i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 z ret <32 x i1> %v } -declare <64 x i1> @llvm.vp.add.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32) - define <64 x i1> @vadd_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll index 22c629088bacd..96dff2464e501 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.add.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i8: ; CHECK: # %bb.0: @@ -82,8 +78,6 @@ define <2 x i8> @vadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.add.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i8: ; CHECK: # %bb.0: @@ -160,8 +154,6 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.add.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v5i8: ; CHECK: # %bb.0: @@ -226,8 +218,6 @@ define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.add.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i8: ; CHECK: # %bb.0: @@ -292,8 +282,6 @@ define <8 x i8> @vadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.add.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i8: ; CHECK: # %bb.0: @@ -358,8 +346,6 @@ define <16 x i8> @vadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v258i8: ; CHECK: # %bb.0: @@ -439,8 +425,6 @@ define <256 x i8> @vadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.add.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i16: ; CHECK: # %bb.0: @@ -505,8 +489,6 @@ define <2 x i16> @vadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.add.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i16: ; CHECK: # %bb.0: @@ -571,8 +553,6 @@ define <4 x i16> @vadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.add.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i16: ; CHECK: # %bb.0: @@ -637,8 +617,6 @@ define <8 x i16> @vadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.add.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i16: ; CHECK: # %bb.0: @@ -703,8 +681,6 @@ define <16 x i16> @vadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.add.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i32: ; CHECK: # %bb.0: @@ -769,8 +745,6 @@ define <2 x i32> @vadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i32: ; CHECK: # %bb.0: @@ -835,8 +809,6 @@ define <4 x i32> @vadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i32: ; CHECK: # %bb.0: @@ -901,8 +873,6 @@ define <8 x i32> @vadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.add.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i32: ; CHECK: # %bb.0: @@ -967,8 +937,6 @@ define <16 x i32> @vadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.add.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i64: ; CHECK: # %bb.0: @@ -1063,8 +1031,6 @@ define <2 x i64> @vadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.add.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i64: ; CHECK: # %bb.0: @@ -1159,8 +1125,6 @@ define <4 x i64> @vadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.add.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i64: ; CHECK: # %bb.0: @@ -1255,8 +1219,6 @@ define <8 x i64> @vadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.add.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i64: ; CHECK: # %bb.0: @@ -1353,8 +1315,6 @@ define <16 x i64> @vadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.add.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll index 1be3fd0910338..96eb846538f7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.and.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.and.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i8: ; CHECK: # %bb.0: @@ -106,8 +102,6 @@ define <2 x i8> @vand_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.and.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vand_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i8: ; CHECK: # %bb.0: @@ -172,8 +166,6 @@ define <4 x i8> @vand_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.and.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vand_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i8: ; CHECK: # %bb.0: @@ -238,8 +230,6 @@ define <8 x i8> @vand_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.and.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vand_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i8: ; CHECK: # %bb.0: @@ -304,8 +294,6 @@ define <16 x i8> @vand_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.and.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vand_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i16: ; CHECK: # %bb.0: @@ -370,8 +358,6 @@ define <2 x i16> @vand_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.and.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vand_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i16: ; CHECK: # %bb.0: @@ -436,8 +422,6 @@ define <4 x i16> @vand_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.and.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vand_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i16: ; CHECK: # %bb.0: @@ -502,8 +486,6 @@ define <8 x i16> @vand_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.and.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vand_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i16: ; CHECK: # %bb.0: @@ -568,8 +550,6 @@ define <16 x i16> @vand_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.and.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vand_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i32: ; CHECK: # %bb.0: @@ -634,8 +614,6 @@ define <2 x i32> @vand_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vand_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i32: ; CHECK: # %bb.0: @@ -700,8 +678,6 @@ define <4 x i32> @vand_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.and.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vand_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i32: ; CHECK: # %bb.0: @@ -766,8 +742,6 @@ define <8 x i32> @vand_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.and.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vand_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i32: ; CHECK: # %bb.0: @@ -832,8 +806,6 @@ define <16 x i32> @vand_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.and.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vand_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i64: ; CHECK: # %bb.0: @@ -928,8 +900,6 @@ define <2 x i64> @vand_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.and.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vand_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i64: ; CHECK: # %bb.0: @@ -1024,8 +994,6 @@ define <4 x i64> @vand_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.and.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vand_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i64: ; CHECK: # %bb.0: @@ -1120,8 +1088,6 @@ define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <11 x i64> @llvm.vp.and.v11i64(<11 x i64>, <11 x i64>, <11 x i1>, i32) - define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v11i64: ; CHECK: # %bb.0: @@ -1216,8 +1182,6 @@ define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) { ret <11 x i64> %v } -declare <16 x i64> @llvm.vp.and.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll index 2455d872ae7f0..da26c63b61e34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -168,8 +168,6 @@ define <16 x bfloat> @vfsgnj_vv_v16bf16_unmasked(<16 x bfloat> %va, <16 x bfloat ret <16 x bfloat> %v } -declare <2 x half> @llvm.vp.copysign.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f16: ; CHECK: # %bb.0: @@ -190,8 +188,6 @@ define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 ret <2 x half> %v } -declare <4 x half> @llvm.vp.copysign.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f16: ; CHECK: # %bb.0: @@ -212,8 +208,6 @@ define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 ret <4 x half> %v } -declare <8 x half> @llvm.vp.copysign.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f16: ; CHECK: # %bb.0: @@ -234,8 +228,6 @@ define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 ret <8 x half> %v } -declare <16 x half> @llvm.vp.copysign.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f16: ; CHECK: # %bb.0: @@ -256,8 +248,6 @@ define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, ret <16 x half> %v } -declare <2 x float> @llvm.vp.copysign.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f32: ; CHECK: # %bb.0: @@ -278,8 +268,6 @@ define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i ret <2 x float> %v } -declare <4 x float> @llvm.vp.copysign.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f32: ; CHECK: # %bb.0: @@ -300,8 +288,6 @@ define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i ret <4 x float> %v } -declare <8 x float> @llvm.vp.copysign.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f32: ; CHECK: # %bb.0: @@ -322,8 +308,6 @@ define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i ret <8 x float> %v } -declare <16 x float> @llvm.vp.copysign.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f32: ; CHECK: # %bb.0: @@ -344,8 +328,6 @@ define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %v ret <16 x float> %v } -declare <2 x double> @llvm.vp.copysign.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f64: ; CHECK: # %bb.0: @@ -366,8 +348,6 @@ define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb ret <2 x double> %v } -declare <4 x double> @llvm.vp.copysign.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f64: ; CHECK: # %bb.0: @@ -388,8 +368,6 @@ define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb ret <4 x double> %v } -declare <8 x double> @llvm.vp.copysign.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f64: ; CHECK: # %bb.0: @@ -410,8 +388,6 @@ define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb ret <8 x double> %v } -declare <15 x double> @llvm.vp.copysign.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v15f64: ; CHECK: # %bb.0: @@ -432,8 +408,6 @@ define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double> ret <15 x double> %v } -declare <16 x double> @llvm.vp.copysign.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f64: ; CHECK: # %bb.0: @@ -454,8 +428,6 @@ define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.copysign.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll index b2279dca45d8d..6bea222ffb90e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vdiv_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vdiv_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vdiv_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vdiv_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vdiv_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i8: ; CHECK: # %bb.0: @@ -112,8 +106,6 @@ define <4 x i8> @vdiv_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v6i8: ; CHECK: # %bb.0: @@ -124,8 +116,6 @@ define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroex ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vdiv_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i8: ; CHECK: # %bb.0: @@ -170,8 +160,6 @@ define <8 x i8> @vdiv_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vdiv_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i8: ; CHECK: # %bb.0: @@ -216,8 +204,6 @@ define <16 x i8> @vdiv_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vdiv_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i16: ; CHECK: # %bb.0: @@ -262,8 +248,6 @@ define <2 x i16> @vdiv_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vdiv_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i16: ; CHECK: # %bb.0: @@ -308,8 +292,6 @@ define <4 x i16> @vdiv_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vdiv_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i16: ; CHECK: # %bb.0: @@ -354,8 +336,6 @@ define <8 x i16> @vdiv_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vdiv_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i16: ; CHECK: # %bb.0: @@ -400,8 +380,6 @@ define <16 x i16> @vdiv_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vdiv_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i32: ; CHECK: # %bb.0: @@ -446,8 +424,6 @@ define <2 x i32> @vdiv_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vdiv_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i32: ; CHECK: # %bb.0: @@ -492,8 +468,6 @@ define <4 x i32> @vdiv_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vdiv_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i32: ; CHECK: # %bb.0: @@ -538,8 +512,6 @@ define <8 x i32> @vdiv_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vdiv_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i32: ; CHECK: # %bb.0: @@ -584,8 +556,6 @@ define <16 x i32> @vdiv_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vdiv_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i64: ; CHECK: # %bb.0: @@ -660,8 +630,6 @@ define <2 x i64> @vdiv_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vdiv_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i64: ; CHECK: # %bb.0: @@ -736,8 +704,6 @@ define <4 x i64> @vdiv_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vdiv_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i64: ; CHECK: # %bb.0: @@ -812,8 +778,6 @@ define <8 x i64> @vdiv_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vdiv_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i64: ; CHECK: # %bb.0: @@ -888,9 +852,6 @@ define <16 x i64> @vdiv_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ret <16 x i64> %v } - -declare <3 x i8> @llvm.vp.sdiv.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vdiv_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -911,8 +872,6 @@ define <3 x i8> @vdiv_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.sdiv.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vdiv_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll index f1155a0657b40..1a7874b2c8c6f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vdivu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vdivu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vdivu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i8: ; CHECK: # %bb.0: @@ -111,8 +105,6 @@ define <4 x i8> @vdivu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v6i8: ; CHECK: # %bb.0: @@ -123,8 +115,6 @@ define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroe ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vdivu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i8: ; CHECK: # %bb.0: @@ -169,8 +159,6 @@ define <8 x i8> @vdivu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vdivu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i8: ; CHECK: # %bb.0: @@ -215,8 +203,6 @@ define <16 x i8> @vdivu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vdivu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i16: ; CHECK: # %bb.0: @@ -261,8 +247,6 @@ define <2 x i16> @vdivu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vdivu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i16: ; CHECK: # %bb.0: @@ -307,8 +291,6 @@ define <4 x i16> @vdivu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vdivu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i16: ; CHECK: # %bb.0: @@ -353,8 +335,6 @@ define <8 x i16> @vdivu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vdivu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i16: ; CHECK: # %bb.0: @@ -399,8 +379,6 @@ define <16 x i16> @vdivu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vdivu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i32: ; CHECK: # %bb.0: @@ -445,8 +423,6 @@ define <2 x i32> @vdivu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vdivu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i32: ; CHECK: # %bb.0: @@ -491,8 +467,6 @@ define <4 x i32> @vdivu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vdivu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i32: ; CHECK: # %bb.0: @@ -537,8 +511,6 @@ define <8 x i32> @vdivu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vdivu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i32: ; CHECK: # %bb.0: @@ -583,8 +555,6 @@ define <16 x i32> @vdivu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vdivu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i64: ; CHECK: # %bb.0: @@ -659,8 +629,6 @@ define <2 x i64> @vdivu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vdivu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i64: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define <4 x i64> @vdivu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vdivu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i64: ; CHECK: # %bb.0: @@ -811,8 +777,6 @@ define <8 x i64> @vdivu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vdivu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i64: ; CHECK: # %bb.0: @@ -887,7 +851,6 @@ define <16 x i64> @vdivu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ret <16 x i64> %v } - define <8 x i8> @vdivu_vv_v8i8_unmasked_avl3(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: vdivu_vv_v8i8_unmasked_avl3: ; CHECK: # %bb.0: @@ -908,8 +871,6 @@ define <8 x i8> @vdivu_vv_v8i8_unmasked_avl7(<8 x i8> %va, <8 x i8> %b) { ret <8 x i8> %v } -declare <3 x i8> @llvm.vp.udiv.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vdivu_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -930,8 +891,6 @@ define <3 x i8> @vdivu_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.udiv.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vdivu_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll index 01bd706ed31f8..2774aba974a29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll @@ -220,8 +220,6 @@ define <16 x bfloat> @vfabs_vv_v16bf16_unmasked(<16 x bfloat> %va, i32 zeroext % ret <16 x bfloat> %v } -declare <2 x half> @llvm.vp.fabs.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vfabs_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v2f16: ; ZVFH: # %bb.0: @@ -274,8 +272,6 @@ define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.fabs.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vfabs_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v4f16: ; ZVFH: # %bb.0: @@ -328,8 +324,6 @@ define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.fabs.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vfabs_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v8f16: ; ZVFH: # %bb.0: @@ -382,8 +376,6 @@ define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.fabs.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vfabs_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_v16f16: ; ZVFH: # %bb.0: @@ -436,8 +428,6 @@ define <16 x half> @vfabs_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.fabs.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vfabs_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f32: ; CHECK: # %bb.0: @@ -458,8 +448,6 @@ define <2 x float> @vfabs_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.fabs.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vfabs_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f32: ; CHECK: # %bb.0: @@ -480,8 +468,6 @@ define <4 x float> @vfabs_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.fabs.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vfabs_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f32: ; CHECK: # %bb.0: @@ -502,8 +488,6 @@ define <8 x float> @vfabs_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.fabs.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vfabs_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f32: ; CHECK: # %bb.0: @@ -524,8 +508,6 @@ define <16 x float> @vfabs_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.fabs.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vfabs_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f64: ; CHECK: # %bb.0: @@ -546,8 +528,6 @@ define <2 x double> @vfabs_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.fabs.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vfabs_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f64: ; CHECK: # %bb.0: @@ -568,8 +548,6 @@ define <4 x double> @vfabs_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.fabs.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vfabs_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f64: ; CHECK: # %bb.0: @@ -590,8 +568,6 @@ define <8 x double> @vfabs_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.fabs.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vfabs_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v15f64: ; CHECK: # %bb.0: @@ -612,8 +588,6 @@ define <15 x double> @vfabs_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.fabs.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vfabs_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f64: ; CHECK: # %bb.0: @@ -634,8 +608,6 @@ define <16 x double> @vfabs_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.fabs.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll index 599f505808ab4..003feb6d748f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -76,7 +73,6 @@ define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -100,7 +96,6 @@ define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfadd_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -126,7 +121,6 @@ define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -150,7 +144,6 @@ define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -174,7 +167,6 @@ define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -198,7 +190,6 @@ define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +213,6 @@ define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -246,7 +236,6 @@ define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -270,7 +259,6 @@ define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfadd_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll index 8a8fe234cacd1..2a65f57bcb94c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fadd.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fadd.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fadd.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fadd.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fadd.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f32: ; CHECK: # %bb.0: @@ -485,8 +473,6 @@ define <2 x float> @vfadd_vf_v2f32_unmasked_commute(<2 x float> %va, float %b, i ret <2 x float> %v } -declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f32: ; CHECK: # %bb.0: @@ -531,8 +517,6 @@ define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f32: ; CHECK: # %bb.0: @@ -577,8 +561,6 @@ define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fadd.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f32: ; CHECK: # %bb.0: @@ -623,8 +605,6 @@ define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fadd.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f64: ; CHECK: # %bb.0: @@ -669,8 +649,6 @@ define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fadd.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f64: ; CHECK: # %bb.0: @@ -715,8 +693,6 @@ define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fadd.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f64: ; CHECK: # %bb.0: @@ -761,8 +737,6 @@ define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fadd.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll index 690c8af7fc8e7..9eb92f45bb149 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll @@ -286,12 +286,3 @@ define <16 x i1> @isnotfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl ret <16 x i1> %1 } -declare <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half>, i32, <2 x i1>, i32) -declare <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float>, i32, <2 x i1>, i32) -declare <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float>, i32, <4 x i1>, i32) -declare <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float>, i32, <8 x i1>, i32) -declare <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float>, i32, <16 x i1>, i32) -declare <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double>, i32, <2 x i1>, i32) -declare <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double>, i32, <4 x i1>, i32) -declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32) -declare <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double>, i32, <16 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll index 85e8638301ded..4ae62901a627a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll @@ -30,7 +30,6 @@ define <2 x i1> @isnan_v2f32(<2 x float> %x) { ret <2 x i1> %1 } - define <4 x i1> @isnan_v4f32(<4 x float> %x) { ; CHECK-LABEL: isnan_v4f32: ; CHECK: # %bb.0: @@ -155,12 +154,3 @@ define <16 x i1> @isnotfinite_v16f64(<16 x double> %x) { ret <16 x i1> %1 } -declare <2 x i1> @llvm.is.fpclass.v2f16(<2 x half>, i32) -declare <2 x i1> @llvm.is.fpclass.v2f32(<2 x float>, i32) -declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32) -declare <8 x i1> @llvm.is.fpclass.v8f32(<8 x float>, i32) -declare <16 x i1> @llvm.is.fpclass.v16f32(<16 x float>, i32) -declare <2 x i1> @llvm.is.fpclass.v2f64(<2 x double>, i32) -declare <4 x i1> @llvm.is.fpclass.v4f64(<4 x double>, i32) -declare <8 x i1> @llvm.is.fpclass.v8f64(<8 x double>, i32) -declare <16 x i1> @llvm.is.fpclass.v16f64(<16 x double>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll index dfd509062ccf7..dbc714f50946d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f16(<1 x half>, <1 x half>, metadata, metadata) define <1 x i1> @fcmp_oeq_vv_v1f16(<1 x half> %va, <1 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v1f16: ; CHECK: # %bb.0: @@ -637,7 +636,6 @@ define <1 x i1> @fcmp_uno_fv_v1f16(<1 x half> %va, half %b) nounwind strictfp { ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x i1> @fcmp_oeq_vv_v2f16(<2 x half> %va, <2 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v2f16: ; CHECK: # %bb.0: @@ -1270,7 +1268,6 @@ define <2 x i1> @fcmp_uno_fv_v2f16(<2 x half> %va, half %b) nounwind strictfp { ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x i1> @fcmp_oeq_vv_v4f16(<4 x half> %va, <4 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v4f16: ; CHECK: # %bb.0: @@ -1903,7 +1900,6 @@ define <4 x i1> @fcmp_uno_fv_v4f16(<4 x half> %va, half %b) nounwind strictfp { ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x i1> @fcmp_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v8f16: ; CHECK: # %bb.0: @@ -2536,7 +2532,6 @@ define <8 x i1> @fcmp_uno_fv_v8f16(<8 x half> %va, half %b) nounwind strictfp { ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmp.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x i1> @fcmp_oeq_vv_v16f16(<16 x half> %va, <16 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v16f16: ; CHECK: # %bb.0: @@ -3211,7 +3206,6 @@ define <16 x i1> @fcmp_uno_fv_v16f16(<16 x half> %va, half %b) nounwind strictfp ret <16 x i1> %1 } -declare <32 x i1> @llvm.experimental.constrained.fcmp.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x i1> @fcmp_oeq_vv_v32f16(<32 x half> %va, <32 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v32f16: ; CHECK: # %bb.0: @@ -3928,7 +3922,6 @@ define <32 x i1> @fcmp_uno_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp ret <32 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f32(<1 x float>, <1 x float>, metadata, metadata) define <1 x i1> @fcmp_oeq_vv_v1f32(<1 x float> %va, <1 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v1f32: ; CHECK: # %bb.0: @@ -4561,7 +4554,6 @@ define <1 x i1> @fcmp_uno_fv_v1f32(<1 x float> %va, float %b) nounwind strictfp ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x i1> @fcmp_oeq_vv_v2f32(<2 x float> %va, <2 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v2f32: ; CHECK: # %bb.0: @@ -5194,7 +5186,6 @@ define <2 x i1> @fcmp_uno_fv_v2f32(<2 x float> %va, float %b) nounwind strictfp ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x i1> @fcmp_oeq_vv_v4f32(<4 x float> %va, <4 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v4f32: ; CHECK: # %bb.0: @@ -5827,7 +5818,6 @@ define <4 x i1> @fcmp_uno_fv_v4f32(<4 x float> %va, float %b) nounwind strictfp ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x i1> @fcmp_oeq_vv_v8f32(<8 x float> %va, <8 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v8f32: ; CHECK: # %bb.0: @@ -6502,7 +6492,6 @@ define <8 x i1> @fcmp_uno_fv_v8f32(<8 x float> %va, float %b) nounwind strictfp ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x i1> @fcmp_oeq_vv_v16f32(<16 x float> %va, <16 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v16f32: ; CHECK: # %bb.0: @@ -7177,7 +7166,6 @@ define <16 x i1> @fcmp_uno_fv_v16f32(<16 x float> %va, float %b) nounwind strict ret <16 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata) define <1 x i1> @fcmp_oeq_vv_v1f64(<1 x double> %va, <1 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v1f64: ; CHECK: # %bb.0: @@ -7810,7 +7798,6 @@ define <1 x i1> @fcmp_uno_fv_v1f64(<1 x double> %va, double %b) nounwind strictf ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x i1> @fcmp_oeq_vv_v2f64(<2 x double> %va, <2 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v2f64: ; CHECK: # %bb.0: @@ -8443,7 +8430,6 @@ define <2 x i1> @fcmp_uno_fv_v2f64(<2 x double> %va, double %b) nounwind strictf ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x i1> @fcmp_oeq_vv_v4f64(<4 x double> %va, <4 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v4f64: ; CHECK: # %bb.0: @@ -9118,7 +9104,6 @@ define <4 x i1> @fcmp_uno_fv_v4f64(<4 x double> %va, double %b) nounwind strictf ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x i1> @fcmp_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll index 472f2073667db..218efde7a477e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmps-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f16(<1 x half>, <1 x half>, metadata, metadata) define <1 x i1> @fcmps_oeq_vv_v1f16(<1 x half> %va, <1 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v1f16: ; CHECK: # %bb.0: @@ -536,7 +535,6 @@ define <1 x i1> @fcmps_uno_fv_v1f16(<1 x half> %va, half %b) nounwind strictfp { ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x i1> @fcmps_oeq_vv_v2f16(<2 x half> %va, <2 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v2f16: ; CHECK: # %bb.0: @@ -1068,7 +1066,6 @@ define <2 x i1> @fcmps_uno_fv_v2f16(<2 x half> %va, half %b) nounwind strictfp { ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x i1> @fcmps_oeq_vv_v4f16(<4 x half> %va, <4 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v4f16: ; CHECK: # %bb.0: @@ -1600,7 +1597,6 @@ define <4 x i1> @fcmps_uno_fv_v4f16(<4 x half> %va, half %b) nounwind strictfp { ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x i1> @fcmps_oeq_vv_v8f16(<8 x half> %va, <8 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v8f16: ; CHECK: # %bb.0: @@ -2132,7 +2128,6 @@ define <8 x i1> @fcmps_uno_fv_v8f16(<8 x half> %va, half %b) nounwind strictfp { ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmps.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x i1> @fcmps_oeq_vv_v16f16(<16 x half> %va, <16 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v16f16: ; CHECK: # %bb.0: @@ -2664,7 +2659,6 @@ define <16 x i1> @fcmps_uno_fv_v16f16(<16 x half> %va, half %b) nounwind strictf ret <16 x i1> %1 } -declare <32 x i1> @llvm.experimental.constrained.fcmps.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x i1> @fcmps_oeq_vv_v32f16(<32 x half> %va, <32 x half> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v32f16: ; CHECK: # %bb.0: @@ -3238,7 +3232,6 @@ define <32 x i1> @fcmps_uno_fv_v32f16(<32 x half> %va, half %b) nounwind strictf ret <32 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f32(<1 x float>, <1 x float>, metadata, metadata) define <1 x i1> @fcmps_oeq_vv_v1f32(<1 x float> %va, <1 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v1f32: ; CHECK: # %bb.0: @@ -3770,7 +3763,6 @@ define <1 x i1> @fcmps_uno_fv_v1f32(<1 x float> %va, float %b) nounwind strictfp ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x i1> @fcmps_oeq_vv_v2f32(<2 x float> %va, <2 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v2f32: ; CHECK: # %bb.0: @@ -4302,7 +4294,6 @@ define <2 x i1> @fcmps_uno_fv_v2f32(<2 x float> %va, float %b) nounwind strictfp ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x i1> @fcmps_oeq_vv_v4f32(<4 x float> %va, <4 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v4f32: ; CHECK: # %bb.0: @@ -4834,7 +4825,6 @@ define <4 x i1> @fcmps_uno_fv_v4f32(<4 x float> %va, float %b) nounwind strictfp ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x i1> @fcmps_oeq_vv_v8f32(<8 x float> %va, <8 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v8f32: ; CHECK: # %bb.0: @@ -5366,7 +5356,6 @@ define <8 x i1> @fcmps_uno_fv_v8f32(<8 x float> %va, float %b) nounwind strictfp ret <8 x i1> %1 } -declare <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x i1> @fcmps_oeq_vv_v16f32(<16 x float> %va, <16 x float> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v16f32: ; CHECK: # %bb.0: @@ -5898,7 +5887,6 @@ define <16 x i1> @fcmps_uno_fv_v16f32(<16 x float> %va, float %b) nounwind stric ret <16 x i1> %1 } -declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata) define <1 x i1> @fcmps_oeq_vv_v1f64(<1 x double> %va, <1 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v1f64: ; CHECK: # %bb.0: @@ -6430,7 +6418,6 @@ define <1 x i1> @fcmps_uno_fv_v1f64(<1 x double> %va, double %b) nounwind strict ret <1 x i1> %1 } -declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x i1> @fcmps_oeq_vv_v2f64(<2 x double> %va, <2 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v2f64: ; CHECK: # %bb.0: @@ -6962,7 +6949,6 @@ define <2 x i1> @fcmps_uno_fv_v2f64(<2 x double> %va, double %b) nounwind strict ret <2 x i1> %1 } -declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x i1> @fcmps_oeq_vv_v4f64(<4 x double> %va, <4 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v4f64: ; CHECK: # %bb.0: @@ -7494,7 +7480,6 @@ define <4 x i1> @fcmps_uno_fv_v4f64(<4 x double> %va, double %b) nounwind strict ret <4 x i1> %1 } -declare <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x i1> @fcmps_oeq_vv_v8f64(<8 x double> %va, <8 x double> %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll index 1bc880d93af1a..f912ed9f0ed2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -88,7 +85,6 @@ define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -112,7 +108,6 @@ define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfdiv_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -138,7 +133,6 @@ define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -162,7 +156,6 @@ define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -186,7 +179,6 @@ define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +214,6 @@ define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -246,7 +237,6 @@ define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -270,7 +260,6 @@ define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -294,7 +283,6 @@ define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll index 30f509436214a..977e236c91fe7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fdiv.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f32: ; CHECK: # %bb.0: @@ -461,8 +449,6 @@ define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe ret <2 x float> %v } -declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f32: ; CHECK: # %bb.0: @@ -507,8 +493,6 @@ define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f32: ; CHECK: # %bb.0: @@ -553,8 +537,6 @@ define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f32: ; CHECK: # %bb.0: @@ -599,8 +581,6 @@ define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f64: ; CHECK: # %bb.0: @@ -645,8 +625,6 @@ define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f64: ; CHECK: # %bb.0: @@ -691,8 +669,6 @@ define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f64: ; CHECK: # %bb.0: @@ -737,8 +713,6 @@ define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll index 6f9885d9529d5..f28b970f48ff7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v2f16: ; ZVFH: # %bb.0: @@ -111,8 +109,6 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v ret <2 x half> %v } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v4f16: ; ZVFH: # %bb.0: @@ -214,8 +210,6 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v ret <4 x half> %v } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v8f16: ; ZVFH: # %bb.0: @@ -317,8 +311,6 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v ret <8 x half> %v } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_v16f16: ; ZVFH: # %bb.0: @@ -420,8 +412,6 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half ret <16 x half> %v } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32: ; CHECK: # %bb.0: @@ -467,8 +457,6 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float ret <2 x float> %v } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32: ; CHECK: # %bb.0: @@ -514,8 +502,6 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float ret <4 x float> %v } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32: ; CHECK: # %bb.0: @@ -561,8 +547,6 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float ret <8 x float> %v } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32: ; CHECK: # %bb.0: @@ -608,8 +592,6 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f ret <16 x float> %v } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64: ; CHECK: # %bb.0: @@ -655,8 +637,6 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do ret <2 x double> %v } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64: ; CHECK: # %bb.0: @@ -702,8 +682,6 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do ret <4 x double> %v } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64: ; CHECK: # %bb.0: @@ -749,8 +727,6 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do ret <8 x double> %v } -declare <15 x double> @llvm.vp.fma.v15f64(<15 x double>, <15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64: ; CHECK: # %bb.0: @@ -776,8 +752,6 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> % ret <15 x double> %v } -declare <16 x double> @llvm.vp.fma.v16f64(<16 x double>, <16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64: ; CHECK: # %bb.0: @@ -827,8 +801,6 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 ret <16 x double> %v } -declare <32 x double> @llvm.vp.fma.v32f64(<32 x double>, <32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll index bc13e1d217a9b..99bdddcbc3253 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfmacc_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f16: ; CHECK: # %bb.0: @@ -115,11 +110,6 @@ define <2 x half> @vfmacc_vf_v2f16_commute_ta(<2 x half> %va, half %b, <2 x half ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfmacc_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f16: ; CHECK: # %bb.0: @@ -226,11 +216,6 @@ define <4 x half> @vfmacc_vf_v4f16_commute_ta(<4 x half> %va, half %b, <4 x half ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfmacc_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f16: ; CHECK: # %bb.0: @@ -337,11 +322,6 @@ define <8 x half> @vfmacc_vf_v8f16_commute_ta(<8 x half> %va, half %b, <8 x half ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfmacc_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v16f16: ; CHECK: # %bb.0: @@ -448,11 +428,6 @@ define <16 x half> @vfmacc_vf_v16f16_commute_ta(<16 x half> %va, half %b, <16 x ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v32f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfmacc_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v32f16: ; CHECK: # %bb.0: @@ -559,11 +534,6 @@ define <32 x half> @vfmacc_vf_v32f16_commute_ta(<32 x half> %va, half %b, <32 x ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfmacc_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f32: ; CHECK: # %bb.0: @@ -670,11 +640,6 @@ define <2 x float> @vfmacc_vf_v2f32_commute_ta(<2 x float> %va, float %b, <2 x f ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfmacc_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f32: ; CHECK: # %bb.0: @@ -781,11 +746,6 @@ define <4 x float> @vfmacc_vf_v4f32_commute_ta(<4 x float> %va, float %b, <4 x f ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfmacc_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f32: ; CHECK: # %bb.0: @@ -892,11 +852,6 @@ define <8 x float> @vfmacc_vf_v8f32_commute_ta(<8 x float> %va, float %b, <8 x f ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfmacc_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v16f32: ; CHECK: # %bb.0: @@ -1003,11 +958,6 @@ define <16 x float> @vfmacc_vf_v16f32_commute_ta(<16 x float> %va, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfmacc_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f64: ; CHECK: # %bb.0: @@ -1114,11 +1064,6 @@ define <2 x double> @vfmacc_vf_v2f64_commute_ta(<2 x double> %va, double %b, <2 ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfmacc_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f64: ; CHECK: # %bb.0: @@ -1225,11 +1170,6 @@ define <4 x double> @vfmacc_vf_v4f64_commute_ta(<4 x double> %va, double %b, <4 ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfmacc_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll index b8f3f0fef0419..bf7336e58ffc0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfmacc and vfmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfmadd_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v2f16: ; CHECK: # %bb.0: @@ -31,8 +29,6 @@ define <2 x half> @vfmadd_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) stri ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfmadd_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v4f16: ; CHECK: # %bb.0: @@ -55,8 +51,6 @@ define <4 x half> @vfmadd_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) stri ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfmadd_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v8f16: ; CHECK: # %bb.0: @@ -79,8 +73,6 @@ define <8 x half> @vfmadd_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) stri ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfmadd_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v16f16: ; CHECK: # %bb.0: @@ -103,8 +95,6 @@ define <16 x half> @vfmadd_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfmadd_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v32f16: ; CHECK: # %bb.0: @@ -129,8 +119,6 @@ define <32 x half> @vfmadd_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfmadd_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v2f32: ; CHECK: # %bb.0: @@ -153,8 +141,6 @@ define <2 x float> @vfmadd_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfmadd_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v4f32: ; CHECK: # %bb.0: @@ -177,8 +163,6 @@ define <4 x float> @vfmadd_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfmadd_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v8f32: ; CHECK: # %bb.0: @@ -201,8 +185,6 @@ define <8 x float> @vfmadd_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfmadd_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v16f32: ; CHECK: # %bb.0: @@ -225,8 +207,6 @@ define <16 x float> @vfmadd_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfmadd_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v2f64: ; CHECK: # %bb.0: @@ -249,8 +229,6 @@ define <2 x double> @vfmadd_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfmadd_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v4f64: ; CHECK: # %bb.0: @@ -273,8 +251,6 @@ define <4 x double> @vfmadd_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfmadd_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll index c736973dd0706..403d0b8d57940 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.maxnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v2f16: ; ZVFH: # %bb.0: @@ -52,8 +50,6 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.maxnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v4f16: ; ZVFH: # %bb.0: @@ -96,8 +92,6 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.maxnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v8f16: ; ZVFH: # %bb.0: @@ -140,8 +134,6 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.maxnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_v16f16: ; ZVFH: # %bb.0: @@ -184,8 +176,6 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.maxnum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32: ; CHECK: # %bb.0: @@ -206,8 +196,6 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.maxnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32: ; CHECK: # %bb.0: @@ -228,8 +216,6 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.maxnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32: ; CHECK: # %bb.0: @@ -250,8 +236,6 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.maxnum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32: ; CHECK: # %bb.0: @@ -272,8 +256,6 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.maxnum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64: ; CHECK: # %bb.0: @@ -294,8 +276,6 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.maxnum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64: ; CHECK: # %bb.0: @@ -316,8 +296,6 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.maxnum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64: ; CHECK: # %bb.0: @@ -338,8 +316,6 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <15 x double> @llvm.vp.maxnum.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfmax_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v15f64: ; CHECK: # %bb.0: @@ -360,8 +336,6 @@ define <15 x double> @vfmax_vv_v15f64_unmasked(<15 x double> %va, <15 x double> ret <15 x double> %v } -declare <16 x double> @llvm.vp.maxnum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f64: ; CHECK: # %bb.0: @@ -382,8 +356,6 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.maxnum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll index c37df892de442..44362efa1fe83 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmax_v2f16_vv: ; ZVFH: # %bb.0: @@ -81,8 +79,6 @@ define <2 x half> @vfmax_v2f16_fv(<2 x half> %a, half %b) { ret <2 x half> %v } -declare <4 x half> @llvm.maxnum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmax_v4f16_vv: ; ZVFH: # %bb.0: @@ -154,8 +150,6 @@ define <4 x half> @vfmax_v4f16_fv(<4 x half> %a, half %b) { ret <4 x half> %v } -declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmax_v8f16_vv: ; ZVFH: # %bb.0: @@ -227,8 +221,6 @@ define <8 x half> @vfmax_v8f16_fv(<8 x half> %a, half %b) { ret <8 x half> %v } -declare <16 x half> @llvm.maxnum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmax_v16f16_vv: ; ZVFH: # %bb.0: @@ -300,8 +292,6 @@ define <16 x half> @vfmax_v16f16_fv(<16 x half> %a, half %b) { ret <16 x half> %v } -declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmax_v2f32_vv: ; CHECK: # %bb.0: @@ -336,8 +326,6 @@ define <2 x float> @vfmax_v2f32_fv(<2 x float> %a, float %b) { ret <2 x float> %v } -declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmax_v4f32_vv: ; CHECK: # %bb.0: @@ -372,8 +360,6 @@ define <4 x float> @vfmax_v4f32_fv(<4 x float> %a, float %b) { ret <4 x float> %v } -declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmax_v8f32_vv: ; CHECK: # %bb.0: @@ -408,8 +394,6 @@ define <8 x float> @vfmax_v8f32_fv(<8 x float> %a, float %b) { ret <8 x float> %v } -declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmax_v16f32_vv: ; CHECK: # %bb.0: @@ -444,8 +428,6 @@ define <16 x float> @vfmax_v16f32_fv(<16 x float> %a, float %b) { ret <16 x float> %v } -declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmax_v2f64_vv: ; CHECK: # %bb.0: @@ -480,8 +462,6 @@ define <2 x double> @vfmax_v2f64_fv(<2 x double> %a, double %b) { ret <2 x double> %v } -declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmax_v4f64_vv: ; CHECK: # %bb.0: @@ -516,8 +496,6 @@ define <4 x double> @vfmax_v4f64_fv(<4 x double> %a, double %b) { ret <4 x double> %v } -declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmax_v8f64_vv: ; CHECK: # %bb.0: @@ -552,8 +530,6 @@ define <8 x double> @vfmax_v8f64_fv(<8 x double> %a, double %b) { ret <8 x double> %v } -declare <16 x double> @llvm.maxnum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: vfmax_v16f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll index c4a143de5cff1..56f7a8d48c5a1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.minnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v2f16: ; ZVFH: # %bb.0: @@ -52,8 +50,6 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.minnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v4f16: ; ZVFH: # %bb.0: @@ -96,8 +92,6 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.minnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v8f16: ; ZVFH: # %bb.0: @@ -140,8 +134,6 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.minnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_v16f16: ; ZVFH: # %bb.0: @@ -184,8 +176,6 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i ret <16 x half> %v } -declare <2 x float> @llvm.vp.minnum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32: ; CHECK: # %bb.0: @@ -206,8 +196,6 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.minnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32: ; CHECK: # %bb.0: @@ -228,8 +216,6 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.minnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32: ; CHECK: # %bb.0: @@ -250,8 +236,6 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.minnum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32: ; CHECK: # %bb.0: @@ -272,8 +256,6 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb ret <16 x float> %v } -declare <2 x double> @llvm.vp.minnum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64: ; CHECK: # %bb.0: @@ -294,8 +276,6 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, ret <2 x double> %v } -declare <4 x double> @llvm.vp.minnum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64: ; CHECK: # %bb.0: @@ -316,8 +296,6 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, ret <4 x double> %v } -declare <8 x double> @llvm.vp.minnum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64: ; CHECK: # %bb.0: @@ -338,8 +316,6 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, ret <8 x double> %v } -declare <15 x double> @llvm.vp.minnum.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v15f64: ; CHECK: # %bb.0: @@ -360,8 +336,6 @@ define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double> ret <15 x double> %v } -declare <16 x double> @llvm.vp.minnum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f64: ; CHECK: # %bb.0: @@ -382,8 +356,6 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> ret <16 x double> %v } -declare <32 x double> @llvm.vp.minnum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll index 90afe36a36c0f..c9bb99d6cb3d6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>) - define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; ZVFH-LABEL: vfmin_v2f16_vv: ; ZVFH: # %bb.0: @@ -81,8 +79,6 @@ define <2 x half> @vfmin_v2f16_fv(<2 x half> %a, half %b) { ret <2 x half> %v } -declare <4 x half> @llvm.minnum.v4f16(<4 x half>, <4 x half>) - define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; ZVFH-LABEL: vfmin_v4f16_vv: ; ZVFH: # %bb.0: @@ -154,8 +150,6 @@ define <4 x half> @vfmin_v4f16_fv(<4 x half> %a, half %b) { ret <4 x half> %v } -declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>) - define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; ZVFH-LABEL: vfmin_v8f16_vv: ; ZVFH: # %bb.0: @@ -227,8 +221,6 @@ define <8 x half> @vfmin_v8f16_fv(<8 x half> %a, half %b) { ret <8 x half> %v } -declare <16 x half> @llvm.minnum.v16f16(<16 x half>, <16 x half>) - define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; ZVFH-LABEL: vfmin_v16f16_vv: ; ZVFH: # %bb.0: @@ -300,8 +292,6 @@ define <16 x half> @vfmin_v16f16_fv(<16 x half> %a, half %b) { ret <16 x half> %v } -declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) - define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmin_v2f32_vv: ; CHECK: # %bb.0: @@ -336,8 +326,6 @@ define <2 x float> @vfmin_v2f32_fv(<2 x float> %a, float %b) { ret <2 x float> %v } -declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) - define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmin_v4f32_vv: ; CHECK: # %bb.0: @@ -372,8 +360,6 @@ define <4 x float> @vfmin_v4f32_fv(<4 x float> %a, float %b) { ret <4 x float> %v } -declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>) - define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmin_v8f32_vv: ; CHECK: # %bb.0: @@ -408,8 +394,6 @@ define <8 x float> @vfmin_v8f32_fv(<8 x float> %a, float %b) { ret <8 x float> %v } -declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>) - define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmin_v16f32_vv: ; CHECK: # %bb.0: @@ -444,8 +428,6 @@ define <16 x float> @vfmin_v16f32_fv(<16 x float> %a, float %b) { ret <16 x float> %v } -declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) - define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmin_v2f64_vv: ; CHECK: # %bb.0: @@ -480,8 +462,6 @@ define <2 x double> @vfmin_v2f64_fv(<2 x double> %a, double %b) { ret <2 x double> %v } -declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>) - define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmin_v4f64_vv: ; CHECK: # %bb.0: @@ -516,8 +496,6 @@ define <4 x double> @vfmin_v4f64_fv(<4 x double> %a, double %b) { ret <4 x double> %v } -declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) - define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmin_v8f64_vv: ; CHECK: # %bb.0: @@ -552,8 +530,6 @@ define <8 x double> @vfmin_v8f64_fv(<8 x double> %a, double %b) { ret <8 x double> %v } -declare <16 x double> @llvm.minnum.v16f64(<16 x double>, <16 x double>) - define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: vfmin_v16f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll index 99fc035235671..f8478c13b3aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfmsac_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define <2 x half> @vfmsac_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half> ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfmsac_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define <4 x half> @vfmsac_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half> ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfmsac_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define <8 x half> @vfmsac_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half> ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfmsac_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v16f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define <16 x half> @vfmsac_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x h ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v32f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfmsac_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v32f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define <32 x half> @vfmsac_vf_v32f16_commute_ta(<32 x half> %a, half %b, <32 x h ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfmsac_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f32: ; CHECK: # %bb.0: @@ -718,11 +688,6 @@ define <2 x float> @vfmsac_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x fl ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfmsac_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f32: ; CHECK: # %bb.0: @@ -837,11 +802,6 @@ define <4 x float> @vfmsac_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x fl ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfmsac_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f32: ; CHECK: # %bb.0: @@ -956,11 +916,6 @@ define <8 x float> @vfmsac_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x fl ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfmsac_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v16f32: ; CHECK: # %bb.0: @@ -1075,11 +1030,6 @@ define <16 x float> @vfmsac_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfmsac_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f64: ; CHECK: # %bb.0: @@ -1194,11 +1144,6 @@ define <2 x double> @vfmsac_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 x ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfmsac_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f64: ; CHECK: # %bb.0: @@ -1313,11 +1258,6 @@ define <4 x double> @vfmsac_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 x ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfmsac_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll index 268494bf337e1..fd733c8dac518 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfmsac and vfmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v2f16: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define <2 x half> @vfmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) stri ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v4f16: ; CHECK: # %bb.0: @@ -59,8 +55,6 @@ define <4 x half> @vfmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) stri ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v8f16: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define <8 x half> @vfmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) stri ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v16f16: ; CHECK: # %bb.0: @@ -111,8 +103,6 @@ define <16 x half> @vfmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v32f16: ; CHECK: # %bb.0: @@ -139,8 +129,6 @@ define <32 x half> @vfmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v2f32: ; CHECK: # %bb.0: @@ -165,8 +153,6 @@ define <2 x float> @vfmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v4f32: ; CHECK: # %bb.0: @@ -191,8 +177,6 @@ define <4 x float> @vfmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v8f32: ; CHECK: # %bb.0: @@ -217,8 +201,6 @@ define <8 x float> @vfmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v16f32: ; CHECK: # %bb.0: @@ -243,8 +225,6 @@ define <16 x float> @vfmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v2f64: ; CHECK: # %bb.0: @@ -269,8 +249,6 @@ define <2 x double> @vfmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v4f64: ; CHECK: # %bb.0: @@ -295,8 +273,6 @@ define <4 x double> @vfmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll index c8148a5e8d49c..f6b2327f5a41c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half>, <1 x half>, metadata, metadata) define <1 x half> @vfmul_vv_v1f16(<1 x half> %va, <1 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v1f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) strictfp { ret <1 x half> %vc } -declare <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -76,7 +73,6 @@ define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -100,7 +96,6 @@ define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -124,7 +119,6 @@ define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfmul_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -150,7 +144,6 @@ define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata) define <1 x float> @vfmul_vv_v1f32(<1 x float> %va, <1 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v1f32: ; CHECK: # %bb.0: # %entry @@ -174,7 +167,6 @@ define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) strictfp { ret <1 x float> %vc } -declare <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -198,7 +190,6 @@ define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +213,6 @@ define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -246,7 +236,6 @@ define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -270,7 +259,6 @@ define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata) define <1 x double> @vfmul_vv_v1f64(<1 x double> %va, <1 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v1f64: ; CHECK: # %bb.0: # %entry @@ -294,7 +282,6 @@ define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) strictfp { ret <1 x double> %vc } -declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -318,7 +305,6 @@ define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -342,7 +328,6 @@ define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfmul_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll index 3c0819e549552..167327fdbd571 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fmul.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fmul.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fmul.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fmul.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fmul.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f32: ; CHECK: # %bb.0: @@ -461,8 +449,6 @@ define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe ret <2 x float> %v } -declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f32: ; CHECK: # %bb.0: @@ -507,8 +493,6 @@ define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fmul.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f32: ; CHECK: # %bb.0: @@ -553,8 +537,6 @@ define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fmul.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f32: ; CHECK: # %bb.0: @@ -599,8 +581,6 @@ define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fmul.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f64: ; CHECK: # %bb.0: @@ -645,8 +625,6 @@ define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fmul.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f64: ; CHECK: # %bb.0: @@ -691,8 +669,6 @@ define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fmul.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f64: ; CHECK: # %bb.0: @@ -737,8 +713,6 @@ define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fmul.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll index cc911d06d8d58..a9857880b5942 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f16: ; CHECK: # %bb.0: @@ -51,8 +49,6 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v ret <2 x half> %v } -declare <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f16: ; CHECK: # %bb.0: @@ -98,8 +94,6 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v ret <4 x half> %v } -declare <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f16: ; CHECK: # %bb.0: @@ -145,8 +139,6 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v ret <8 x half> %v } -declare <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f16: ; CHECK: # %bb.0: @@ -192,8 +184,6 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half ret <16 x half> %v } -declare <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32: ; CHECK: # %bb.0: @@ -239,8 +229,6 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float ret <2 x float> %v } -declare <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32: ; CHECK: # %bb.0: @@ -286,8 +274,6 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float ret <4 x float> %v } -declare <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32: ; CHECK: # %bb.0: @@ -333,8 +319,6 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float ret <8 x float> %v } -declare <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32: ; CHECK: # %bb.0: @@ -380,8 +364,6 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f ret <16 x float> %v } -declare <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64: ; CHECK: # %bb.0: @@ -427,8 +409,6 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do ret <2 x double> %v } -declare <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64: ; CHECK: # %bb.0: @@ -474,8 +454,6 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do ret <4 x double> %v } -declare <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64: ; CHECK: # %bb.0: @@ -521,8 +499,6 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do ret <8 x double> %v } -declare <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double>, <15 x double>, <15 x double>, <15 x i1>, i32) - define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64: ; CHECK: # %bb.0: @@ -548,8 +524,6 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> % ret <15 x double> %v } -declare <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double>, <16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64: ; CHECK: # %bb.0: @@ -599,8 +573,6 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 ret <16 x double> %v } -declare <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double>, <32 x double>, <32 x double>, <32 x i1>, i32) - define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll index dede0e707d929..84a89b23bc3b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -204,8 +204,6 @@ define <16 x bfloat> @vfneg_vv_v16bf16_unmasked(<16 x bfloat> %va, i32 zeroext % ret <16 x bfloat> %v } -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vfneg_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v2f16: ; ZVFH: # %bb.0: @@ -254,8 +252,6 @@ define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vfneg_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v4f16: ; ZVFH: # %bb.0: @@ -304,8 +300,6 @@ define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vfneg_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v8f16: ; ZVFH: # %bb.0: @@ -354,8 +348,6 @@ define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vfneg_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_v16f16: ; ZVFH: # %bb.0: @@ -404,8 +396,6 @@ define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vfneg_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f32: ; CHECK: # %bb.0: @@ -426,8 +416,6 @@ define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ret <2 x float> %v } -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vfneg_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f32: ; CHECK: # %bb.0: @@ -448,8 +436,6 @@ define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vfneg_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f32: ; CHECK: # %bb.0: @@ -470,8 +456,6 @@ define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ret <8 x float> %v } -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vfneg_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f32: ; CHECK: # %bb.0: @@ -492,8 +476,6 @@ define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl ret <16 x float> %v } -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vfneg_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f64: ; CHECK: # %bb.0: @@ -514,8 +496,6 @@ define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) ret <2 x double> %v } -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vfneg_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f64: ; CHECK: # %bb.0: @@ -536,8 +516,6 @@ define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vfneg_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f64: ; CHECK: # %bb.0: @@ -558,8 +536,6 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) ret <8 x double> %v } -declare <15 x double> @llvm.vp.fneg.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v15f64: ; CHECK: # %bb.0: @@ -580,8 +556,6 @@ define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e ret <15 x double> %v } -declare <16 x double> @llvm.vp.fneg.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f64: ; CHECK: # %bb.0: @@ -602,8 +576,6 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e ret <16 x double> %v } -declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll index 4ab94444b1b89..3bcf7496868d0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfnmacc_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f16: ; CHECK: # %bb.0: @@ -131,11 +126,6 @@ define <2 x half> @vfnmacc_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfnmacc_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f16: ; CHECK: # %bb.0: @@ -258,11 +248,6 @@ define <4 x half> @vfnmacc_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfnmacc_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f16: ; CHECK: # %bb.0: @@ -385,11 +370,6 @@ define <8 x half> @vfnmacc_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfnmacc_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v16f16: ; CHECK: # %bb.0: @@ -512,11 +492,6 @@ define <16 x half> @vfnmacc_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v32f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v32f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfnmacc_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v32f16: ; CHECK: # %bb.0: @@ -639,11 +614,6 @@ define <32 x half> @vfnmacc_vf_v32f16_commute_ta(<32 x half> %a, half %b, <32 x ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfnmacc_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f32: ; CHECK: # %bb.0: @@ -766,11 +736,6 @@ define <2 x float> @vfnmacc_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x f ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfnmacc_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f32: ; CHECK: # %bb.0: @@ -893,11 +858,6 @@ define <4 x float> @vfnmacc_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x f ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfnmacc_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f32: ; CHECK: # %bb.0: @@ -1020,11 +980,6 @@ define <8 x float> @vfnmacc_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x f ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfnmacc_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v16f32: ; CHECK: # %bb.0: @@ -1147,11 +1102,6 @@ define <16 x float> @vfnmacc_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfnmacc_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f64: ; CHECK: # %bb.0: @@ -1274,11 +1224,6 @@ define <2 x double> @vfnmacc_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfnmacc_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f64: ; CHECK: # %bb.0: @@ -1401,11 +1346,6 @@ define <4 x double> @vfnmacc_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfnmacc_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll index afc89717596b2..48a3c9d695b56 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmacc and vfnmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f16: ; CHECK: # %bb.0: @@ -35,8 +33,6 @@ define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) str ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f16: ; CHECK: # %bb.0: @@ -63,8 +59,6 @@ define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) str ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f16: ; CHECK: # %bb.0: @@ -91,8 +85,6 @@ define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) str ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f16: ; CHECK: # %bb.0: @@ -119,8 +111,6 @@ define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v32f16: ; CHECK: # %bb.0: @@ -149,8 +139,6 @@ define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f32: ; CHECK: # %bb.0: @@ -177,8 +165,6 @@ define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f32: ; CHECK: # %bb.0: @@ -205,8 +191,6 @@ define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f32: ; CHECK: # %bb.0: @@ -233,8 +217,6 @@ define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f32: ; CHECK: # %bb.0: @@ -261,8 +243,6 @@ define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f64: ; CHECK: # %bb.0: @@ -289,8 +269,6 @@ define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f64: ; CHECK: # %bb.0: @@ -317,8 +295,6 @@ define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll index 4d9b002cc785c..6ecddefa70119 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32) -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vfnmsac_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v2f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define <2 x half> @vfnmsac_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half ret <2 x half> %u } -declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32) -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vfnmsac_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v4f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define <4 x half> @vfnmsac_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half ret <4 x half> %u } -declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32) -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vfnmsac_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v8f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define <8 x half> @vfnmsac_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half ret <8 x half> %u } -declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32) -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vfnmsac_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v16f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define <16 x half> @vfnmsac_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x ret <16 x half> %u } -declare <32 x half> @llvm.vp.fma.v26f16(<32 x half>, <32 x half>, <32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.fneg.v26f16(<32 x half>, <32 x i1>, i32) -declare <32 x half> @llvm.vp.merge.v26f16(<32 x i1>, <32 x half>, <32 x half>, i32) -declare <32 x half> @llvm.vp.select.v26f16(<32 x i1>, <32 x half>, <32 x half>, i32) - define <32 x half> @vfnmsac_vv_v26f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v26f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define <32 x half> @vfnmsac_vf_v26f16_commute_ta(<32 x half> %a, half %b, <32 x ret <32 x half> %u } -declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32) -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vfnmsac_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v2f32: ; CHECK: # %bb.0: @@ -718,11 +688,6 @@ define <2 x float> @vfnmsac_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x f ret <2 x float> %u } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32) -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vfnmsac_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v4f32: ; CHECK: # %bb.0: @@ -837,11 +802,6 @@ define <4 x float> @vfnmsac_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x f ret <4 x float> %u } -declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32) -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vfnmsac_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v8f32: ; CHECK: # %bb.0: @@ -956,11 +916,6 @@ define <8 x float> @vfnmsac_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x f ret <8 x float> %u } -declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32) -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vfnmsac_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v16f32: ; CHECK: # %bb.0: @@ -1075,11 +1030,6 @@ define <16 x float> @vfnmsac_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16 ret <16 x float> %u } -declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32) -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vfnmsac_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v2f64: ; CHECK: # %bb.0: @@ -1194,11 +1144,6 @@ define <2 x double> @vfnmsac_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 ret <2 x double> %u } -declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32) -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vfnmsac_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v4f64: ; CHECK: # %bb.0: @@ -1313,11 +1258,6 @@ define <4 x double> @vfnmsac_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 ret <4 x double> %u } -declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32) -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vfnmsac_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll index d9863bb36c739..9c9ca4375faf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) - define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f16: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) str ret <2 x half> %vd } -declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) - define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f16: ; CHECK: # %bb.0: @@ -59,8 +55,6 @@ define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) str ret <4 x half> %vd } -declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) - define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f16: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) str ret <8 x half> %vd } -declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) - define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f16: ; CHECK: # %bb.0: @@ -111,8 +103,6 @@ define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) ret <16 x half> %vd } -declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) - define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v32f16: ; CHECK: # %bb.0: @@ -139,8 +129,6 @@ define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) ret <32 x half> %vd } -declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) - define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f32: ; CHECK: # %bb.0: @@ -165,8 +153,6 @@ define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) ret <2 x float> %vd } -declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) - define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f32: ; CHECK: # %bb.0: @@ -191,8 +177,6 @@ define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) ret <4 x float> %vd } -declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata) - define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f32: ; CHECK: # %bb.0: @@ -217,8 +201,6 @@ define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) ret <8 x float> %vd } -declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata) - define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v16f32: ; CHECK: # %bb.0: @@ -243,8 +225,6 @@ define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float ret <16 x float> %vd } -declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) - define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v2f64: ; CHECK: # %bb.0: @@ -269,8 +249,6 @@ define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double ret <2 x double> %vd } -declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) - define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v4f64: ; CHECK: # %bb.0: @@ -295,8 +273,6 @@ define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double ret <4 x double> %vd } -declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata) - define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll index b8a6be40b3f32..91475222f7cff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half>, metadata) define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v2f16_v2f32: ; CHECK: # %bb.0: @@ -16,7 +15,6 @@ define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f16(<2 x half>, metadata) define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v2f16_v2f64: ; CHECK: # %bb.0: @@ -29,7 +27,6 @@ define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) strictfp { ret <2 x double> %evec } -declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half>, metadata) define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v4f16_v4f32: ; CHECK: # %bb.0: @@ -41,7 +38,6 @@ define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata) define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v4f16_v4f64: ; CHECK: # %bb.0: @@ -54,7 +50,6 @@ define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) strictfp { ret <4 x double> %evec } -declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata) define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v8f16_v8f32: ; CHECK: # %bb.0: @@ -66,7 +61,6 @@ define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half>, metadata) define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) strictfp { ; CHECK-LABEL: vfpext_v8f16_v8f64: ; CHECK: # %bb.0: @@ -79,7 +73,6 @@ define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) strictfp { ret <8 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata) define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) strictfp { ; CHECK-LABEL: vfpext_v2f32_v2f64: ; CHECK: # %bb.0: @@ -91,7 +84,6 @@ define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) strictfp { ret <2 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata) define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) strictfp { ; CHECK-LABEL: vfpext_v4f32_v4f64: ; CHECK: # %bb.0: @@ -103,7 +95,6 @@ define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) strictfp { ret <4 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(<8 x float>, metadata) define <8 x double> @vfpext_v8f32_v8f64(<8 x float> %va) strictfp { ; CHECK-LABEL: vfpext_v8f32_v8f64: ; CHECK: # %bb.0: @@ -115,7 +106,6 @@ define <8 x double> @vfpext_v8f32_v8f64(<8 x float> %va) strictfp { ret <8 x double> %evec } -declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2bf16(<2 x bfloat>, metadata) define <2 x float> @vfpext_v2bf16_v2f32(<2 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v2bf16_v2f32: ; CHECK: # %bb.0: @@ -127,7 +117,6 @@ define <2 x float> @vfpext_v2bf16_v2f32(<2 x bfloat> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2bf16(<2 x bfloat>, metadata) define <2 x double> @vfpext_v2bf16_v2f64(<2 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v2bf16_v2f64: ; CHECK: # %bb.0: @@ -140,7 +129,6 @@ define <2 x double> @vfpext_v2bf16_v2f64(<2 x bfloat> %va) strictfp { ret <2 x double> %evec } -declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4bf16(<4 x bfloat>, metadata) define <4 x float> @vfpext_v4bf16_v4f32(<4 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v4bf16_v4f32: ; CHECK: # %bb.0: @@ -152,7 +140,6 @@ define <4 x float> @vfpext_v4bf16_v4f32(<4 x bfloat> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4bf16(<4 x bfloat>, metadata) define <4 x double> @vfpext_v4bf16_v4f64(<4 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v4bf16_v4f64: ; CHECK: # %bb.0: @@ -165,7 +152,6 @@ define <4 x double> @vfpext_v4bf16_v4f64(<4 x bfloat> %va) strictfp { ret <4 x double> %evec } -declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8bf16(<8 x bfloat>, metadata) define <8 x float> @vfpext_v8bf16_v8f32(<8 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v8bf16_v8f32: ; CHECK: # %bb.0: @@ -177,7 +163,6 @@ define <8 x float> @vfpext_v8bf16_v8f32(<8 x bfloat> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8bf16(<8 x bfloat>, metadata) define <8 x double> @vfpext_v8bf16_v8f64(<8 x bfloat> %va) strictfp { ; CHECK-LABEL: vfpext_v8bf16_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll index ac58a597a0812..77a67f1619dd0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f16(<1 x half>, metadata) define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i1: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f16(<1 x half>, metadata) define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i1: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half>, metadata) define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) strictfp { ; RV32-LABEL: vfptosi_v1f16_v1i7: ; RV32: # %bb.0: @@ -49,7 +46,6 @@ define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) strictfp { ret <1 x i7> %evec } -declare <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half>, metadata) define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) strictfp { ; RV32-LABEL: vfptoui_v1f16_v1i7: ; RV32: # %bb.0: @@ -68,7 +64,6 @@ define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) strictfp { ret <1 x i7> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f16(<1 x half>, metadata) define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i8: ; CHECK: # %bb.0: @@ -80,7 +75,6 @@ define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f16(<1 x half>, metadata) define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i8: ; CHECK: # %bb.0: @@ -92,7 +86,6 @@ define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f16(<1 x half>, metadata) define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i16: ; CHECK: # %bb.0: @@ -103,7 +96,6 @@ define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f16(<1 x half>, metadata) define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i16: ; CHECK: # %bb.0: @@ -114,7 +106,6 @@ define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f16(<1 x half>, metadata) define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i32: ; CHECK: # %bb.0: @@ -126,7 +117,6 @@ define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f16(<1 x half>, metadata) define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i32: ; CHECK: # %bb.0: @@ -138,7 +128,6 @@ define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f16(<1 x half>, metadata) define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f16_v1i64: ; CHECK: # %bb.0: @@ -151,7 +140,6 @@ define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) strictfp { ret <1 x i64> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f16(<1 x half>, metadata) define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f16_v1i64: ; CHECK: # %bb.0: @@ -164,7 +152,6 @@ define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) strictfp { ret <1 x i64> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half>, metadata) define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i1: ; CHECK: # %bb.0: @@ -177,7 +164,6 @@ define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half>, metadata) define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i1: ; CHECK: # %bb.0: @@ -190,7 +176,6 @@ define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half>, metadata) define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i8: ; CHECK: # %bb.0: @@ -202,7 +187,6 @@ define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half>, metadata) define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i8: ; CHECK: # %bb.0: @@ -214,7 +198,6 @@ define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half>, metadata) define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i16: ; CHECK: # %bb.0: @@ -225,7 +208,6 @@ define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half>, metadata) define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i16: ; CHECK: # %bb.0: @@ -236,7 +218,6 @@ define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half>, metadata) define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i32: ; CHECK: # %bb.0: @@ -248,7 +229,6 @@ define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half>, metadata) define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i32: ; CHECK: # %bb.0: @@ -260,7 +240,6 @@ define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half>, metadata) define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f16_v2i64: ; CHECK: # %bb.0: @@ -273,7 +252,6 @@ define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) strictfp { ret <2 x i64> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half>, metadata) define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f16_v2i64: ; CHECK: # %bb.0: @@ -286,7 +264,6 @@ define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) strictfp { ret <2 x i64> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f16(<4 x half>, metadata) define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i1: ; CHECK: # %bb.0: @@ -299,7 +276,6 @@ define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f16(<4 x half>, metadata) define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i1: ; CHECK: # %bb.0: @@ -312,7 +288,6 @@ define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half>, metadata) define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i8: ; CHECK: # %bb.0: @@ -324,7 +299,6 @@ define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f16(<4 x half>, metadata) define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i8: ; CHECK: # %bb.0: @@ -336,7 +310,6 @@ define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half>, metadata) define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i16: ; CHECK: # %bb.0: @@ -347,7 +320,6 @@ define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half>, metadata) define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i16: ; CHECK: # %bb.0: @@ -358,7 +330,6 @@ define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half>, metadata) define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i32: ; CHECK: # %bb.0: @@ -370,7 +341,6 @@ define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half>, metadata) define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i32: ; CHECK: # %bb.0: @@ -382,7 +352,6 @@ define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half>, metadata) define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f16_v4i64: ; CHECK: # %bb.0: @@ -395,7 +364,6 @@ define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) strictfp { ret <4 x i64> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f16(<4 x half>, metadata) define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f16_v4i64: ; CHECK: # %bb.0: @@ -408,7 +376,6 @@ define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) strictfp { ret <4 x i64> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f16(<8 x half>, metadata) define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i1: ; CHECK: # %bb.0: @@ -421,7 +388,6 @@ define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f16(<8 x half>, metadata) define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i1: ; CHECK: # %bb.0: @@ -434,7 +400,6 @@ define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f16(<8 x half>, metadata) define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i8: ; CHECK: # %bb.0: @@ -446,7 +411,6 @@ define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f16(<8 x half>, metadata) define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i8: ; CHECK: # %bb.0: @@ -458,7 +422,6 @@ define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f16(<8 x half>, metadata) define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i16: ; CHECK: # %bb.0: @@ -469,7 +432,6 @@ define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f16(<8 x half>, metadata) define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i16: ; CHECK: # %bb.0: @@ -480,7 +442,6 @@ define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f16(<8 x half>, metadata) define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i32: ; CHECK: # %bb.0: @@ -492,7 +453,6 @@ define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f16(<8 x half>, metadata) define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i32: ; CHECK: # %bb.0: @@ -504,7 +464,6 @@ define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f16(<8 x half>, metadata) define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f16_v8i64: ; CHECK: # %bb.0: @@ -517,7 +476,6 @@ define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) strictfp { ret <8 x i64> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f16(<8 x half>, metadata) define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f16_v8i64: ; CHECK: # %bb.0: @@ -530,7 +488,6 @@ define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) strictfp { ret <8 x i64> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f16(<16 x half>, metadata) define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i1: ; CHECK: # %bb.0: @@ -543,7 +500,6 @@ define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f16(<16 x half>, metadata) define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i1: ; CHECK: # %bb.0: @@ -556,7 +512,6 @@ define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f16(<16 x half>, metadata) define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i8: ; CHECK: # %bb.0: @@ -568,7 +523,6 @@ define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f16(<16 x half>, metadata) define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i8: ; CHECK: # %bb.0: @@ -580,7 +534,6 @@ define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f16(<16 x half>, metadata) define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i16: ; CHECK: # %bb.0: @@ -591,7 +544,6 @@ define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f16(<16 x half>, metadata) define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i16: ; CHECK: # %bb.0: @@ -602,7 +554,6 @@ define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f16(<16 x half>, metadata) define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f16_v16i32: ; CHECK: # %bb.0: @@ -614,7 +565,6 @@ define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) strictfp { ret <16 x i32> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f16(<16 x half>, metadata) define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f16_v16i32: ; CHECK: # %bb.0: @@ -626,7 +576,6 @@ define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) strictfp { ret <16 x i32> %evec } -declare <32 x i1> @llvm.experimental.constrained.fptosi.v32i1.v32f16(<32 x half>, metadata) define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v32f16_v32i1: ; CHECK: # %bb.0: @@ -640,7 +589,6 @@ define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) strictfp { ret <32 x i1> %evec } -declare <32 x i1> @llvm.experimental.constrained.fptoui.v32i1.v32f16(<32 x half>, metadata) define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v32f16_v32i1: ; CHECK: # %bb.0: @@ -654,7 +602,6 @@ define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) strictfp { ret <32 x i1> %evec } -declare <32 x i8> @llvm.experimental.constrained.fptosi.v32i8.v32f16(<32 x half>, metadata) define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v32f16_v32i8: ; CHECK: # %bb.0: @@ -667,7 +614,6 @@ define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) strictfp { ret <32 x i8> %evec } -declare <32 x i8> @llvm.experimental.constrained.fptoui.v32i8.v32f16(<32 x half>, metadata) define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v32f16_v32i8: ; CHECK: # %bb.0: @@ -680,7 +626,6 @@ define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) strictfp { ret <32 x i8> %evec } -declare <32 x i16> @llvm.experimental.constrained.fptosi.v32i16.v32f16(<32 x half>, metadata) define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptosi_v32f16_v32i16: ; CHECK: # %bb.0: @@ -692,7 +637,6 @@ define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) strictfp { ret <32 x i16> %evec } -declare <32 x i16> @llvm.experimental.constrained.fptoui.v32i16.v32f16(<32 x half>, metadata) define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) strictfp { ; CHECK-LABEL: vfptoui_v32f16_v32i16: ; CHECK: # %bb.0: @@ -704,7 +648,6 @@ define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) strictfp { ret <32 x i16> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f32(<1 x float>, metadata) define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i1: ; CHECK: # %bb.0: @@ -717,7 +660,6 @@ define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f32(<1 x float>, metadata) define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i1: ; CHECK: # %bb.0: @@ -730,7 +672,6 @@ define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f32(<1 x float>, metadata) define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i8: ; CHECK: # %bb.0: @@ -743,7 +684,6 @@ define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f32(<1 x float>, metadata) define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i8: ; CHECK: # %bb.0: @@ -756,7 +696,6 @@ define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f32(<1 x float>, metadata) define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i16: ; CHECK: # %bb.0: @@ -768,7 +707,6 @@ define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f32(<1 x float>, metadata) define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i16: ; CHECK: # %bb.0: @@ -780,7 +718,6 @@ define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float>, metadata) define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i32: ; CHECK: # %bb.0: @@ -791,7 +728,6 @@ define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float>, metadata) define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i32: ; CHECK: # %bb.0: @@ -802,7 +738,6 @@ define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float>, metadata) define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f32_v1i64: ; CHECK: # %bb.0: @@ -814,7 +749,6 @@ define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) strictfp { ret <1 x i64> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float>, metadata) define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f32_v1i64: ; CHECK: # %bb.0: @@ -826,7 +760,6 @@ define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) strictfp { ret <1 x i64> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f32(<2 x float>, metadata) define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i1: ; CHECK: # %bb.0: @@ -839,7 +772,6 @@ define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f32(<2 x float>, metadata) define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i1: ; CHECK: # %bb.0: @@ -852,7 +784,6 @@ define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f32(<2 x float>, metadata) define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i8: ; CHECK: # %bb.0: @@ -865,7 +796,6 @@ define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f32(<2 x float>, metadata) define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i8: ; CHECK: # %bb.0: @@ -878,7 +808,6 @@ define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f32(<2 x float>, metadata) define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i16: ; CHECK: # %bb.0: @@ -890,7 +819,6 @@ define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f32(<2 x float>, metadata) define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i16: ; CHECK: # %bb.0: @@ -902,7 +830,6 @@ define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float>, metadata) define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i32: ; CHECK: # %bb.0: @@ -913,7 +840,6 @@ define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float>, metadata) define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i32: ; CHECK: # %bb.0: @@ -924,7 +850,6 @@ define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata) define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f32_v2i64: ; CHECK: # %bb.0: @@ -936,7 +861,6 @@ define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) strictfp { ret <2 x i64> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata) define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f32_v2i64: ; CHECK: # %bb.0: @@ -948,7 +872,6 @@ define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) strictfp { ret <2 x i64> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f32(<4 x float>, metadata) define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i1: ; CHECK: # %bb.0: @@ -961,7 +884,6 @@ define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f32(<4 x float>, metadata) define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i1: ; CHECK: # %bb.0: @@ -974,7 +896,6 @@ define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f32(<4 x float>, metadata) define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i8: ; CHECK: # %bb.0: @@ -987,7 +908,6 @@ define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f32(<4 x float>, metadata) define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i8: ; CHECK: # %bb.0: @@ -1000,7 +920,6 @@ define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f32(<4 x float>, metadata) define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i16: ; CHECK: # %bb.0: @@ -1012,7 +931,6 @@ define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f32(<4 x float>, metadata) define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i16: ; CHECK: # %bb.0: @@ -1024,7 +942,6 @@ define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata) define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i32: ; CHECK: # %bb.0: @@ -1035,7 +952,6 @@ define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata) define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i32: ; CHECK: # %bb.0: @@ -1046,7 +962,6 @@ define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata) define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f32_v4i64: ; CHECK: # %bb.0: @@ -1058,7 +973,6 @@ define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) strictfp { ret <4 x i64> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata) define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f32_v4i64: ; CHECK: # %bb.0: @@ -1070,7 +984,6 @@ define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) strictfp { ret <4 x i64> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f32(<8 x float>, metadata) define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i1: ; CHECK: # %bb.0: @@ -1083,7 +996,6 @@ define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f32(<8 x float>, metadata) define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i1: ; CHECK: # %bb.0: @@ -1096,7 +1008,6 @@ define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f32(<8 x float>, metadata) define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i8: ; CHECK: # %bb.0: @@ -1109,7 +1020,6 @@ define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f32(<8 x float>, metadata) define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i8: ; CHECK: # %bb.0: @@ -1122,7 +1032,6 @@ define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f32(<8 x float>, metadata) define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i16: ; CHECK: # %bb.0: @@ -1134,7 +1043,6 @@ define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f32(<8 x float>, metadata) define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i16: ; CHECK: # %bb.0: @@ -1146,7 +1054,6 @@ define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f32(<8 x float>, metadata) define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i32: ; CHECK: # %bb.0: @@ -1157,7 +1064,6 @@ define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f32(<8 x float>, metadata) define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i32: ; CHECK: # %bb.0: @@ -1168,7 +1074,6 @@ define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f32(<8 x float>, metadata) define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f32_v8i64: ; CHECK: # %bb.0: @@ -1180,7 +1085,6 @@ define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) strictfp { ret <8 x i64> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f32(<8 x float>, metadata) define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f32_v8i64: ; CHECK: # %bb.0: @@ -1192,7 +1096,6 @@ define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) strictfp { ret <8 x i64> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float>, metadata) define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i1: ; CHECK: # %bb.0: @@ -1205,7 +1108,6 @@ define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float>, metadata) define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i1: ; CHECK: # %bb.0: @@ -1218,7 +1120,6 @@ define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) strictfp { ret <16 x i1> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float>, metadata) define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i8: ; CHECK: # %bb.0: @@ -1231,7 +1132,6 @@ define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float>, metadata) define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i8: ; CHECK: # %bb.0: @@ -1244,7 +1144,6 @@ define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) strictfp { ret <16 x i8> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float>, metadata) define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i16: ; CHECK: # %bb.0: @@ -1256,7 +1155,6 @@ define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float>, metadata) define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i16: ; CHECK: # %bb.0: @@ -1268,7 +1166,6 @@ define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) strictfp { ret <16 x i16> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float>, metadata) define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptosi_v16f32_v16i32: ; CHECK: # %bb.0: @@ -1279,7 +1176,6 @@ define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) strictfp { ret <16 x i32> %evec } -declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float>, metadata) define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) strictfp { ; CHECK-LABEL: vfptoui_v16f32_v16i32: ; CHECK: # %bb.0: @@ -1290,7 +1186,6 @@ define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) strictfp { ret <16 x i32> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f64(<1 x double>, metadata) define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i1: ; CHECK: # %bb.0: @@ -1303,7 +1198,6 @@ define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f64(<1 x double>, metadata) define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i1: ; CHECK: # %bb.0: @@ -1316,7 +1210,6 @@ define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) strictfp { ret <1 x i1> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f64(<1 x double>, metadata) define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i8: ; CHECK: # %bb.0: @@ -1331,7 +1224,6 @@ define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f64(<1 x double>, metadata) define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i8: ; CHECK: # %bb.0: @@ -1346,7 +1238,6 @@ define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) strictfp { ret <1 x i8> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f64(<1 x double>, metadata) define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i16: ; CHECK: # %bb.0: @@ -1359,7 +1250,6 @@ define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f64(<1 x double>, metadata) define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i16: ; CHECK: # %bb.0: @@ -1372,7 +1262,6 @@ define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) strictfp { ret <1 x i16> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata) define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i32: ; CHECK: # %bb.0: @@ -1384,7 +1273,6 @@ define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata) define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i32: ; CHECK: # %bb.0: @@ -1396,7 +1284,6 @@ define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) strictfp { ret <1 x i32> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata) define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v1f64_v1i64: ; CHECK: # %bb.0: @@ -1407,7 +1294,6 @@ define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) strictfp { ret <1 x i64> %evec } -declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata) define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v1f64_v1i64: ; CHECK: # %bb.0: @@ -1418,7 +1304,6 @@ define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) strictfp { ret <1 x i64> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f64(<2 x double>, metadata) define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i1: ; CHECK: # %bb.0: @@ -1431,7 +1316,6 @@ define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f64(<2 x double>, metadata) define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i1: ; CHECK: # %bb.0: @@ -1444,7 +1328,6 @@ define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) strictfp { ret <2 x i1> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f64(<2 x double>, metadata) define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i8: ; CHECK: # %bb.0: @@ -1459,7 +1342,6 @@ define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f64(<2 x double>, metadata) define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i8: ; CHECK: # %bb.0: @@ -1474,7 +1356,6 @@ define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) strictfp { ret <2 x i8> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f64(<2 x double>, metadata) define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i16: ; CHECK: # %bb.0: @@ -1487,7 +1368,6 @@ define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f64(<2 x double>, metadata) define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i16: ; CHECK: # %bb.0: @@ -1500,7 +1380,6 @@ define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) strictfp { ret <2 x i16> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata) define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i32: ; CHECK: # %bb.0: @@ -1512,7 +1391,6 @@ define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata) define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i32: ; CHECK: # %bb.0: @@ -1524,7 +1402,6 @@ define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) strictfp { ret <2 x i32> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata) define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v2f64_v2i64: ; CHECK: # %bb.0: @@ -1535,7 +1412,6 @@ define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) strictfp { ret <2 x i64> %evec } -declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata) define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v2f64_v2i64: ; CHECK: # %bb.0: @@ -1546,7 +1422,6 @@ define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) strictfp { ret <2 x i64> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f64(<4 x double>, metadata) define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i1: ; CHECK: # %bb.0: @@ -1559,7 +1434,6 @@ define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f64(<4 x double>, metadata) define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i1: ; CHECK: # %bb.0: @@ -1572,7 +1446,6 @@ define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) strictfp { ret <4 x i1> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f64(<4 x double>, metadata) define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i8: ; CHECK: # %bb.0: @@ -1587,7 +1460,6 @@ define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f64(<4 x double>, metadata) define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i8: ; CHECK: # %bb.0: @@ -1602,7 +1474,6 @@ define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) strictfp { ret <4 x i8> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double>, metadata) define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i16: ; CHECK: # %bb.0: @@ -1615,7 +1486,6 @@ define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f64(<4 x double>, metadata) define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i16: ; CHECK: # %bb.0: @@ -1628,7 +1498,6 @@ define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) strictfp { ret <4 x i16> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double>, metadata) define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i32: ; CHECK: # %bb.0: @@ -1640,7 +1509,6 @@ define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double>, metadata) define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i32: ; CHECK: # %bb.0: @@ -1652,7 +1520,6 @@ define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) strictfp { ret <4 x i32> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata) define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v4f64_v4i64: ; CHECK: # %bb.0: @@ -1663,7 +1530,6 @@ define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) strictfp { ret <4 x i64> %evec } -declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double>, metadata) define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v4f64_v4i64: ; CHECK: # %bb.0: @@ -1674,7 +1540,6 @@ define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) strictfp { ret <4 x i64> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double>, metadata) define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i1: ; CHECK: # %bb.0: @@ -1687,7 +1552,6 @@ define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double>, metadata) define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i1: ; CHECK: # %bb.0: @@ -1700,7 +1564,6 @@ define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) strictfp { ret <8 x i1> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double>, metadata) define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i8: ; CHECK: # %bb.0: @@ -1715,7 +1578,6 @@ define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double>, metadata) define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i8: ; CHECK: # %bb.0: @@ -1730,7 +1592,6 @@ define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) strictfp { ret <8 x i8> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double>, metadata) define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i16: ; CHECK: # %bb.0: @@ -1743,7 +1604,6 @@ define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double>, metadata) define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i16: ; CHECK: # %bb.0: @@ -1756,7 +1616,6 @@ define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) strictfp { ret <8 x i16> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double>, metadata) define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i32: ; CHECK: # %bb.0: @@ -1768,7 +1627,6 @@ define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double>, metadata) define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i32: ; CHECK: # %bb.0: @@ -1780,7 +1638,6 @@ define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) strictfp { ret <8 x i32> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double>, metadata) define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptosi_v8f64_v8i64: ; CHECK: # %bb.0: @@ -1791,7 +1648,6 @@ define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) strictfp { ret <8 x i64> %evec } -declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double>, metadata) define <8 x i64> @vfptoui_v8f64_v8i64(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptoui_v8f64_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll index 1f74691437ad2..1aa8d5509d191 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll @@ -8,7 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata) define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f64_v2f32: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp { ret <2 x float> %evec } -declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double>, metadata, metadata) define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f64_v2f16: ; CHECK: # %bb.0: @@ -33,7 +31,6 @@ define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata) define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f32_v2f16: ; CHECK: # %bb.0: @@ -45,7 +42,6 @@ define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp { ret <2 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata) define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f64_v4f32: ; CHECK: # %bb.0: @@ -57,7 +53,6 @@ define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp { ret <4 x float> %evec } -declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata) define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f64_v4f16: ; CHECK: # %bb.0: @@ -70,7 +65,6 @@ define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata) define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f32_v4f16: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp { ret <4 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata) define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f64_v8f32: ; CHECK: # %bb.0: @@ -94,7 +87,6 @@ define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp { ret <8 x float> %evec } -declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata) define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f64_v8f16: ; CHECK: # %bb.0: @@ -107,7 +99,6 @@ define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata) define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f32_v8f16: ; CHECK: # %bb.0: @@ -119,7 +110,6 @@ define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp { ret <8 x half> %evec } -declare <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f64(<2 x double>, metadata, metadata) define <2 x bfloat> @vfptrunc_v2f64_v2bf16(<2 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f64_v2bf16: ; CHECK: # %bb.0: @@ -132,7 +122,6 @@ define <2 x bfloat> @vfptrunc_v2f64_v2bf16(<2 x double> %va) strictfp { ret <2 x bfloat> %evec } -declare <2 x bfloat> @llvm.experimental.constrained.fptrunc.v2bf16.v2f32(<2 x float>, metadata, metadata) define <2 x bfloat> @vfptrunc_v2f32_v2bf16(<2 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v2f32_v2bf16: ; CHECK: # %bb.0: @@ -144,7 +133,6 @@ define <2 x bfloat> @vfptrunc_v2f32_v2bf16(<2 x float> %va) strictfp { ret <2 x bfloat> %evec } -declare <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f64(<4 x double>, metadata, metadata) define <4 x bfloat> @vfptrunc_v4f64_v4bf16(<4 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f64_v4bf16: ; CHECK: # %bb.0: @@ -157,7 +145,6 @@ define <4 x bfloat> @vfptrunc_v4f64_v4bf16(<4 x double> %va) strictfp { ret <4 x bfloat> %evec } -declare <4 x bfloat> @llvm.experimental.constrained.fptrunc.v4bf16.v4f32(<4 x float>, metadata, metadata) define <4 x bfloat> @vfptrunc_v4f32_v4bf16(<4 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v4f32_v4bf16: ; CHECK: # %bb.0: @@ -169,7 +156,6 @@ define <4 x bfloat> @vfptrunc_v4f32_v4bf16(<4 x float> %va) strictfp { ret <4 x bfloat> %evec } -declare <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f64(<8 x double>, metadata, metadata) define <8 x bfloat> @vfptrunc_v8f64_v8bf16(<8 x double> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f64_v8bf16: ; CHECK: # %bb.0: @@ -182,7 +168,6 @@ define <8 x bfloat> @vfptrunc_v8f64_v8bf16(<8 x double> %va) strictfp { ret <8 x bfloat> %evec } -declare <8 x bfloat> @llvm.experimental.constrained.fptrunc.v8bf16.v8f32(<8 x float>, metadata, metadata) define <8 x bfloat> @vfptrunc_v8f32_v8bf16(<8 x float> %va) strictfp { ; CHECK-LABEL: vfptrunc_v8f32_v8bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll index fb813d4381a7d..52e09f6f10a87 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfrdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define <2 x half> @vfrdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfrdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x half> @vfrdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfrdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define <8 x half> @vfrdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfrdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define <16 x half> @vfrdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zero ret <16 x half> %v } -declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfrdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f32: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define <2 x float> @vfrdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zero ret <2 x float> %v } -declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfrdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f32: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define <4 x float> @vfrdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zero ret <4 x float> %v } -declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfrdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define <8 x float> @vfrdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zero ret <8 x float> %v } -declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfrdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define <16 x float> @vfrdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 z ret <16 x float> %v } -declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfrdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f64: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define <2 x double> @vfrdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 z ret <2 x double> %v } -declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfrdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f64: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define <4 x double> @vfrdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 z ret <4 x double> %v } -declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfrdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f64: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define <8 x double> @vfrdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 z ret <8 x double> %v } -declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfrdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll index 63c2d1f2e7db3..30b840a2f6b4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfrsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define <2 x half> @vfrsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfrsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define <4 x half> @vfrsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfrsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define <8 x half> @vfrsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfrsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define <16 x half> @vfrsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zero ret <16 x half> %v } -declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfrsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f32: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define <2 x float> @vfrsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zero ret <2 x float> %v } -declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfrsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f32: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define <4 x float> @vfrsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zero ret <4 x float> %v } -declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfrsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define <8 x float> @vfrsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zero ret <8 x float> %v } -declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfrsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define <16 x float> @vfrsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 z ret <16 x float> %v } -declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfrsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f64: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define <2 x double> @vfrsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 z ret <2 x double> %v } -declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfrsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f64: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define <4 x double> @vfrsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 z ret <4 x double> %v } -declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfrsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f64: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define <8 x double> @vfrsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 z ret <8 x double> %v } -declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfrsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll index 62d03e1ab588a..bdc061ef1732b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half>, metadata, metadata) - define <2 x half> @vfsqrt_v2f16(<2 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v2f16: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x half> @vfsqrt_v2f16(<2 x half> %v) strictfp { ret <2 x half> %r } -declare <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half>, metadata, metadata) - define <4 x half> @vfsqrt_v4f16(<4 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v4f16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x half> @vfsqrt_v4f16(<4 x half> %v) strictfp { ret <4 x half> %r } -declare <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half>, metadata, metadata) - define <8 x half> @vfsqrt_v8f16(<8 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v8f16: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x half> @vfsqrt_v8f16(<8 x half> %v) strictfp { ret <8 x half> %r } -declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata) - define <16 x half> @vfsqrt_v16f16(<16 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v16f16: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x half> @vfsqrt_v16f16(<16 x half> %v) strictfp { ret <16 x half> %r } -declare <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half>, metadata, metadata) - define <32 x half> @vfsqrt_v32f16(<32 x half> %v) strictfp { ; CHECK-LABEL: vfsqrt_v32f16: ; CHECK: # %bb.0: @@ -65,8 +55,6 @@ define <32 x half> @vfsqrt_v32f16(<32 x half> %v) strictfp { ret <32 x half> %r } -declare <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float>, metadata, metadata) - define <2 x float> @vfsqrt_v2f32(<2 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v2f32: ; CHECK: # %bb.0: @@ -77,8 +65,6 @@ define <2 x float> @vfsqrt_v2f32(<2 x float> %v) strictfp { ret <2 x float> %r } -declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata) - define <4 x float> @vfsqrt_v4f32(<4 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v4f32: ; CHECK: # %bb.0: @@ -89,8 +75,6 @@ define <4 x float> @vfsqrt_v4f32(<4 x float> %v) strictfp { ret <4 x float> %r } -declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata) - define <8 x float> @vfsqrt_v8f32(<8 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v8f32: ; CHECK: # %bb.0: @@ -101,8 +85,6 @@ define <8 x float> @vfsqrt_v8f32(<8 x float> %v) strictfp { ret <8 x float> %r } -declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata) - define <16 x float> @vfsqrt_v16f32(<16 x float> %v) strictfp { ; CHECK-LABEL: vfsqrt_v16f32: ; CHECK: # %bb.0: @@ -113,8 +95,6 @@ define <16 x float> @vfsqrt_v16f32(<16 x float> %v) strictfp { ret <16 x float> %r } -declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata) - define <2 x double> @vfsqrt_v2f64(<2 x double> %v) strictfp { ; CHECK-LABEL: vfsqrt_v2f64: ; CHECK: # %bb.0: @@ -125,8 +105,6 @@ define <2 x double> @vfsqrt_v2f64(<2 x double> %v) strictfp { ret <2 x double> %r } -declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata) - define <4 x double> @vfsqrt_v4f64(<4 x double> %v) strictfp { ; CHECK-LABEL: vfsqrt_v4f64: ; CHECK: # %bb.0: @@ -137,8 +115,6 @@ define <4 x double> @vfsqrt_v4f64(<4 x double> %v) strictfp { ret <4 x double> %r } -declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata) - define <8 x double> @vfsqrt_v8f64(<8 x double> %v) strictfp { ; CHECK-LABEL: vfsqrt_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll index 6244419de65b1..b431d4873fa1b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.sqrt.v2f16(<2 x half>, <2 x i1>, i32) - define <2 x half> @vfsqrt_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v2f16: ; ZVFH: # %bb.0: @@ -50,8 +48,6 @@ define <2 x half> @vfsqrt_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ret <2 x half> %v } -declare <4 x half> @llvm.vp.sqrt.v4f16(<4 x half>, <4 x i1>, i32) - define <4 x half> @vfsqrt_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v4f16: ; ZVFH: # %bb.0: @@ -92,8 +88,6 @@ define <4 x half> @vfsqrt_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.sqrt.v8f16(<8 x half>, <8 x i1>, i32) - define <8 x half> @vfsqrt_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v8f16: ; ZVFH: # %bb.0: @@ -134,8 +128,6 @@ define <8 x half> @vfsqrt_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ret <8 x half> %v } -declare <16 x half> @llvm.vp.sqrt.v16f16(<16 x half>, <16 x i1>, i32) - define <16 x half> @vfsqrt_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_v16f16: ; ZVFH: # %bb.0: @@ -176,8 +168,6 @@ define <16 x half> @vfsqrt_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) ret <16 x half> %v } -declare <2 x float> @llvm.vp.sqrt.v2f32(<2 x float>, <2 x i1>, i32) - define <2 x float> @vfsqrt_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f32: ; CHECK: # %bb.0: @@ -198,8 +188,6 @@ define <2 x float> @vfsqrt_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) ret <2 x float> %v } -declare <4 x float> @llvm.vp.sqrt.v4f32(<4 x float>, <4 x i1>, i32) - define <4 x float> @vfsqrt_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f32: ; CHECK: # %bb.0: @@ -220,8 +208,6 @@ define <4 x float> @vfsqrt_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) ret <4 x float> %v } -declare <8 x float> @llvm.vp.sqrt.v8f32(<8 x float>, <8 x i1>, i32) - define <8 x float> @vfsqrt_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f32: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define <8 x float> @vfsqrt_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) ret <8 x float> %v } -declare <16 x float> @llvm.vp.sqrt.v16f32(<16 x float>, <16 x i1>, i32) - define <16 x float> @vfsqrt_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f32: ; CHECK: # %bb.0: @@ -264,8 +248,6 @@ define <16 x float> @vfsqrt_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %ev ret <16 x float> %v } -declare <2 x double> @llvm.vp.sqrt.v2f64(<2 x double>, <2 x i1>, i32) - define <2 x double> @vfsqrt_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f64: ; CHECK: # %bb.0: @@ -286,8 +268,6 @@ define <2 x double> @vfsqrt_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl ret <2 x double> %v } -declare <4 x double> @llvm.vp.sqrt.v4f64(<4 x double>, <4 x i1>, i32) - define <4 x double> @vfsqrt_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f64: ; CHECK: # %bb.0: @@ -308,8 +288,6 @@ define <4 x double> @vfsqrt_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl ret <4 x double> %v } -declare <8 x double> @llvm.vp.sqrt.v8f64(<8 x double>, <8 x i1>, i32) - define <8 x double> @vfsqrt_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f64: ; CHECK: # %bb.0: @@ -330,8 +308,6 @@ define <8 x double> @vfsqrt_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl ret <8 x double> %v } -declare <15 x double> @llvm.vp.sqrt.v15f64(<15 x double>, <15 x i1>, i32) - define <15 x double> @vfsqrt_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v15f64: ; CHECK: # %bb.0: @@ -352,8 +328,6 @@ define <15 x double> @vfsqrt_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext % ret <15 x double> %v } -declare <16 x double> @llvm.vp.sqrt.v16f64(<16 x double>, <16 x i1>, i32) - define <16 x double> @vfsqrt_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f64: ; CHECK: # %bb.0: @@ -374,8 +348,6 @@ define <16 x double> @vfsqrt_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext % ret <16 x double> %v } -declare <32 x double> @llvm.vp.sqrt.v32f64(<32 x double>, <32 x i1>, i32) - define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll index e6001352a237b..32a0d2407c955 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half>, <2 x half>, metadata, metadata) define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v2f16: ; CHECK: # %bb.0: # %entry @@ -28,7 +27,6 @@ define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) strictfp { ret <2 x half> %vc } -declare <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half>, <4 x half>, metadata, metadata) define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v4f16: ; CHECK: # %bb.0: # %entry @@ -52,7 +50,6 @@ define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) strictfp { ret <4 x half> %vc } -declare <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half>, <8 x half>, metadata, metadata) define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v8f16: ; CHECK: # %bb.0: # %entry @@ -88,7 +85,6 @@ define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) strictfp { ret <8 x half> %vc } -declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata) define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v16f16: ; CHECK: # %bb.0: # %entry @@ -112,7 +108,6 @@ define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) strictfp { ret <16 x half> %vc } -declare <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half>, <32 x half>, metadata, metadata) define <32 x half> @vfsub_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v32f16: ; CHECK: # %bb.0: # %entry @@ -138,7 +133,6 @@ define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) strictfp { ret <32 x half> %vc } -declare <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float>, metadata, metadata) define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v2f32: ; CHECK: # %bb.0: # %entry @@ -162,7 +156,6 @@ define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) strictfp { ret <2 x float> %vc } -declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata) define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v4f32: ; CHECK: # %bb.0: # %entry @@ -186,7 +179,6 @@ define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) strictfp { ret <4 x float> %vc } -declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata) define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v8f32: ; CHECK: # %bb.0: # %entry @@ -222,7 +214,6 @@ define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) strictfp { ret <8 x float> %vc } -declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata) define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v16f32: ; CHECK: # %bb.0: # %entry @@ -246,7 +237,6 @@ define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) strictfp { ret <16 x float> %vc } -declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata) define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v2f64: ; CHECK: # %bb.0: # %entry @@ -270,7 +260,6 @@ define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) strictfp { ret <2 x double> %vc } -declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata) define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v4f64: ; CHECK: # %bb.0: # %entry @@ -294,7 +283,6 @@ define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) strictfp { ret <4 x double> %vc } -declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata) define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp { ; CHECK-LABEL: vfsub_vv_v8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll index 0f3a6de4f4a90..6299d5d86acdd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) - define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v2f16: ; ZVFH: # %bb.0: @@ -104,8 +102,6 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } -declare <3 x half> @llvm.vp.fsub.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) - define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v3f16: ; ZVFH: # %bb.0: @@ -127,8 +123,6 @@ define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3 ret <3 x half> %v } -declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) - define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v4f16: ; ZVFH: # %bb.0: @@ -223,8 +217,6 @@ define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext ret <4 x half> %v } -declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) - define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v8f16: ; ZVFH: # %bb.0: @@ -319,8 +311,6 @@ define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext ret <8 x half> %v } -declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) - define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_v16f16: ; ZVFH: # %bb.0: @@ -415,8 +405,6 @@ define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe ret <16 x half> %v } -declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) - define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f32: ; CHECK: # %bb.0: @@ -461,8 +449,6 @@ define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe ret <2 x float> %v } -declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f32: ; CHECK: # %bb.0: @@ -507,8 +493,6 @@ define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe ret <4 x float> %v } -declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) - define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f32: ; CHECK: # %bb.0: @@ -553,8 +537,6 @@ define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe ret <8 x float> %v } -declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) - define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f32: ; CHECK: # %bb.0: @@ -599,8 +581,6 @@ define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze ret <16 x float> %v } -declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) - define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f64: ; CHECK: # %bb.0: @@ -645,8 +625,6 @@ define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze ret <2 x double> %v } -declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) - define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f64: ; CHECK: # %bb.0: @@ -691,8 +669,6 @@ define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze ret <4 x double> %v } -declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) - define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f64: ; CHECK: # %bb.0: @@ -737,8 +713,6 @@ define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze ret <8 x double> %v } -declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) - define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll index a9e9b757f372e..b5f844b7c7e6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x float> @llvm.fma.v1f32(<1 x float>, <1 x float>, <1 x float>) - define <1 x float> @vfwmacc_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v1f32: ; CHECK: # %bb.0: @@ -149,8 +147,6 @@ define <1 x float> @vfwnmsac_fv_v1f32(<1 x float> %va, <1 x half> %vb, half %c) ret <1 x float> %vg } -declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) - define <2 x float> @vfwmacc_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v2f32: ; CHECK: # %bb.0: @@ -294,9 +290,6 @@ define <2 x float> @vfwnmsac_fv_v2f32(<2 x float> %va, <2 x half> %vb, half %c) ret <2 x float> %vg } - -declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) - define <4 x float> @vfwmacc_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v4f32: ; CHECK: # %bb.0: @@ -440,8 +433,6 @@ define <4 x float> @vfwnmsac_fv_v4f32(<4 x float> %va, <4 x half> %vb, half %c) ret <4 x float> %vg } -declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) - define <8 x float> @vfwmacc_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v8f32: ; CHECK: # %bb.0: @@ -585,8 +576,6 @@ define <8 x float> @vfwnmsac_fv_v8f32(<8 x float> %va, <8 x half> %vb, half %c) ret <8 x float> %vg } -declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) - define <16 x float> @vfwmacc_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v16f32: ; CHECK: # %bb.0: @@ -730,8 +719,6 @@ define <16 x float> @vfwnmsac_fv_v16f32(<16 x float> %va, <16 x half> %vb, half ret <16 x float> %vg } -declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) - define <1 x double> @vfwmacc_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v1f64: ; CHECK: # %bb.0: @@ -875,8 +862,6 @@ define <1 x double> @vfwnmsac_fv_v1f64(<1 x double> %va, <1 x float> %vb, float ret <1 x double> %vg } -declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) - define <2 x double> @vfwmacc_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v2f64: ; CHECK: # %bb.0: @@ -1020,9 +1005,6 @@ define <2 x double> @vfwnmsac_fv_v2f64(<2 x double> %va, <2 x float> %vb, float ret <2 x double> %vg } - -declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) - define <4 x double> @vfwmacc_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v4f64: ; CHECK: # %bb.0: @@ -1166,8 +1148,6 @@ define <4 x double> @vfwnmsac_fv_v4f64(<4 x double> %va, <4 x float> %vb, float ret <4 x double> %vg } -declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) - define <8 x double> @vfwmacc_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_v8f64: ; CHECK: # %bb.0: @@ -1669,7 +1649,6 @@ define <2 x double> @vfwnmsac_fv_v2f64_v2f16(<2 x double> %va, <2 x half> %vb, h ret <2 x double> %vg } - define <4 x double> @vfwmacc_vv_v4f64_v4f16(<4 x double> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_v4f64_v4f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll index ce5483e3ae8cc..8b436d080f065 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata) define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i1_v1f16: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata) define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i1_v1f16: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata) define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i1_v1f32: ; CHECK: # %bb.0: @@ -43,7 +40,6 @@ define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata) define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i1_v1f32: ; CHECK: # %bb.0: @@ -56,7 +52,6 @@ define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata) define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i1_v1f64: ; CHECK: # %bb.0: @@ -69,7 +64,6 @@ define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata) define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i1_v1f64: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata) define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i1_v2f16: ; CHECK: # %bb.0: @@ -95,7 +88,6 @@ define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata) define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i1_v2f16: ; CHECK: # %bb.0: @@ -108,7 +100,6 @@ define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata) define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i1_v2f32: ; CHECK: # %bb.0: @@ -121,7 +112,6 @@ define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata) define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i1_v2f32: ; CHECK: # %bb.0: @@ -134,7 +124,6 @@ define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata) define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i1_v2f64: ; CHECK: # %bb.0: @@ -147,7 +136,6 @@ define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata) define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i1_v2f64: ; CHECK: # %bb.0: @@ -160,7 +148,6 @@ define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata) define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i1_v4f16: ; CHECK: # %bb.0: @@ -173,7 +160,6 @@ define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata) define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i1_v4f16: ; CHECK: # %bb.0: @@ -186,7 +172,6 @@ define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata) define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i1_v4f32: ; CHECK: # %bb.0: @@ -199,7 +184,6 @@ define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata) define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i1_v4f32: ; CHECK: # %bb.0: @@ -212,7 +196,6 @@ define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata) define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i1_v4f64: ; CHECK: # %bb.0: @@ -225,7 +208,6 @@ define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata) define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i1_v4f64: ; CHECK: # %bb.0: @@ -238,7 +220,6 @@ define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata) define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i1_v8f16: ; CHECK: # %bb.0: @@ -251,7 +232,6 @@ define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata) define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i1_v8f16: ; CHECK: # %bb.0: @@ -264,7 +244,6 @@ define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata) define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i1_v8f32: ; CHECK: # %bb.0: @@ -277,7 +256,6 @@ define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata) define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i1_v8f32: ; CHECK: # %bb.0: @@ -290,7 +268,6 @@ define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata) define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i1_v8f64: ; CHECK: # %bb.0: @@ -303,7 +280,6 @@ define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata) define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i1_v8f64: ; CHECK: # %bb.0: @@ -316,7 +292,6 @@ define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata) define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i1_v16f16: ; CHECK: # %bb.0: @@ -329,7 +304,6 @@ define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata) define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i1_v16f16: ; CHECK: # %bb.0: @@ -342,7 +316,6 @@ define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata) define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i1_v16f32: ; CHECK: # %bb.0: @@ -355,7 +328,6 @@ define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata) define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i1_v16f32: ; CHECK: # %bb.0: @@ -368,7 +340,6 @@ define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp { ret <16 x float> %evec } -declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata) define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ; CHECK-LABEL: vsitofp_v32i1_v32f16: ; CHECK: # %bb.0: @@ -382,7 +353,6 @@ define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ret <32 x half> %evec } -declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata) define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ; CHECK-LABEL: vuitofp_v32i1_v32f16: ; CHECK: # %bb.0: @@ -396,7 +366,6 @@ define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp { ret <32 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata) define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i8_v1f16: ; CHECK: # %bb.0: @@ -408,7 +377,6 @@ define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata) define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ; RV32-LABEL: vsitofp_v1i7_v1f16: ; RV32: # %bb.0: @@ -431,7 +399,6 @@ define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata) define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i7_v1f16: ; CHECK: # %bb.0: @@ -444,7 +411,6 @@ define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata) define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i8_v1f16: ; CHECK: # %bb.0: @@ -456,7 +422,6 @@ define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata) define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i8_v1f32: ; CHECK: # %bb.0: @@ -468,7 +433,6 @@ define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata) define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i8_v1f32: ; CHECK: # %bb.0: @@ -480,7 +444,6 @@ define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata) define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i8_v1f64: ; CHECK: # %bb.0: @@ -492,7 +455,6 @@ define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata) define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i8_v1f64: ; CHECK: # %bb.0: @@ -504,7 +466,6 @@ define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata) define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i8_v2f16: ; CHECK: # %bb.0: @@ -516,7 +477,6 @@ define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata) define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i8_v2f16: ; CHECK: # %bb.0: @@ -528,7 +488,6 @@ define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata) define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i8_v2f32: ; CHECK: # %bb.0: @@ -540,7 +499,6 @@ define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata) define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i8_v2f32: ; CHECK: # %bb.0: @@ -552,7 +510,6 @@ define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata) define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i8_v2f64: ; CHECK: # %bb.0: @@ -564,7 +521,6 @@ define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata) define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i8_v2f64: ; CHECK: # %bb.0: @@ -576,7 +532,6 @@ define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata) define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i8_v4f16: ; CHECK: # %bb.0: @@ -588,7 +543,6 @@ define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata) define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i8_v4f16: ; CHECK: # %bb.0: @@ -600,7 +554,6 @@ define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata) define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i8_v4f32: ; CHECK: # %bb.0: @@ -612,7 +565,6 @@ define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata) define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i8_v4f32: ; CHECK: # %bb.0: @@ -624,7 +576,6 @@ define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata) define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i8_v4f64: ; CHECK: # %bb.0: @@ -636,7 +587,6 @@ define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata) define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i8_v4f64: ; CHECK: # %bb.0: @@ -648,7 +598,6 @@ define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata) define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i8_v8f16: ; CHECK: # %bb.0: @@ -660,7 +609,6 @@ define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata) define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i8_v8f16: ; CHECK: # %bb.0: @@ -672,7 +620,6 @@ define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata) define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i8_v8f32: ; CHECK: # %bb.0: @@ -684,7 +631,6 @@ define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata) define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i8_v8f32: ; CHECK: # %bb.0: @@ -696,7 +642,6 @@ define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata) define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i8_v8f64: ; CHECK: # %bb.0: @@ -708,7 +653,6 @@ define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata) define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i8_v8f64: ; CHECK: # %bb.0: @@ -720,7 +664,6 @@ define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata) define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i8_v16f16: ; CHECK: # %bb.0: @@ -732,7 +675,6 @@ define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata) define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i8_v16f16: ; CHECK: # %bb.0: @@ -744,7 +686,6 @@ define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata) define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i8_v16f32: ; CHECK: # %bb.0: @@ -756,7 +697,6 @@ define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata) define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i8_v16f32: ; CHECK: # %bb.0: @@ -768,7 +708,6 @@ define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp { ret <16 x float> %evec } -declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata) define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ; CHECK-LABEL: vsitofp_v32i8_v32f16: ; CHECK: # %bb.0: @@ -782,7 +721,6 @@ define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ret <32 x half> %evec } -declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata) define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ; CHECK-LABEL: vuitofp_v32i8_v32f16: ; CHECK: # %bb.0: @@ -796,7 +734,6 @@ define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp { ret <32 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata) define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i16_v1f16: ; CHECK: # %bb.0: @@ -807,7 +744,6 @@ define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata) define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i16_v1f16: ; CHECK: # %bb.0: @@ -818,7 +754,6 @@ define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata) define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i16_v1f32: ; CHECK: # %bb.0: @@ -830,7 +765,6 @@ define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata) define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i16_v1f32: ; CHECK: # %bb.0: @@ -842,7 +776,6 @@ define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata) define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i16_v1f64: ; CHECK: # %bb.0: @@ -854,7 +787,6 @@ define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata) define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i16_v1f64: ; CHECK: # %bb.0: @@ -866,7 +798,6 @@ define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata) define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i16_v2f16: ; CHECK: # %bb.0: @@ -877,7 +808,6 @@ define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata) define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i16_v2f16: ; CHECK: # %bb.0: @@ -888,7 +818,6 @@ define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata) define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i16_v2f32: ; CHECK: # %bb.0: @@ -900,7 +829,6 @@ define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata) define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i16_v2f32: ; CHECK: # %bb.0: @@ -912,7 +840,6 @@ define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata) define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i16_v2f64: ; CHECK: # %bb.0: @@ -924,7 +851,6 @@ define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata) define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i16_v2f64: ; CHECK: # %bb.0: @@ -936,7 +862,6 @@ define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata) define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i16_v4f16: ; CHECK: # %bb.0: @@ -947,7 +872,6 @@ define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata) define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i16_v4f16: ; CHECK: # %bb.0: @@ -958,7 +882,6 @@ define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata) define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i16_v4f32: ; CHECK: # %bb.0: @@ -970,7 +893,6 @@ define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata) define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i16_v4f32: ; CHECK: # %bb.0: @@ -982,7 +904,6 @@ define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata) define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i16_v4f64: ; CHECK: # %bb.0: @@ -994,7 +915,6 @@ define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata) define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i16_v4f64: ; CHECK: # %bb.0: @@ -1006,7 +926,6 @@ define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata) define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i16_v8f16: ; CHECK: # %bb.0: @@ -1017,7 +936,6 @@ define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata) define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i16_v8f16: ; CHECK: # %bb.0: @@ -1028,7 +946,6 @@ define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata) define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i16_v8f32: ; CHECK: # %bb.0: @@ -1040,7 +957,6 @@ define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata) define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i16_v8f32: ; CHECK: # %bb.0: @@ -1052,7 +968,6 @@ define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata) define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i16_v8f64: ; CHECK: # %bb.0: @@ -1064,7 +979,6 @@ define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata) define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i16_v8f64: ; CHECK: # %bb.0: @@ -1076,7 +990,6 @@ define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata) define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i16_v16f16: ; CHECK: # %bb.0: @@ -1087,7 +1000,6 @@ define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata) define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i16_v16f16: ; CHECK: # %bb.0: @@ -1098,7 +1010,6 @@ define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata) define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i16_v16f32: ; CHECK: # %bb.0: @@ -1110,7 +1021,6 @@ define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata) define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i16_v16f32: ; CHECK: # %bb.0: @@ -1122,7 +1032,6 @@ define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp { ret <16 x float> %evec } -declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata) define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ; CHECK-LABEL: vsitofp_v32i16_v32f16: ; CHECK: # %bb.0: @@ -1134,7 +1043,6 @@ define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ret <32 x half> %evec } -declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata) define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ; CHECK-LABEL: vuitofp_v32i16_v32f16: ; CHECK: # %bb.0: @@ -1146,7 +1054,6 @@ define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp { ret <32 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata) define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i32_v1f16: ; CHECK: # %bb.0: @@ -1158,7 +1065,6 @@ define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata) define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i32_v1f16: ; CHECK: # %bb.0: @@ -1170,7 +1076,6 @@ define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata) define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i32_v1f32: ; CHECK: # %bb.0: @@ -1181,7 +1086,6 @@ define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata) define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i32_v1f32: ; CHECK: # %bb.0: @@ -1192,7 +1096,6 @@ define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata) define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i32_v1f64: ; CHECK: # %bb.0: @@ -1204,7 +1107,6 @@ define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata) define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i32_v1f64: ; CHECK: # %bb.0: @@ -1216,7 +1118,6 @@ define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp { ret <1 x double> %evec } -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata) define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i32_v2f16: ; CHECK: # %bb.0: @@ -1228,7 +1129,6 @@ define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata) define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i32_v2f16: ; CHECK: # %bb.0: @@ -1240,7 +1140,6 @@ define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata) define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i32_v2f32: ; CHECK: # %bb.0: @@ -1251,7 +1150,6 @@ define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata) define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i32_v2f32: ; CHECK: # %bb.0: @@ -1262,7 +1160,6 @@ define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata) define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i32_v2f64: ; CHECK: # %bb.0: @@ -1274,7 +1171,6 @@ define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata) define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i32_v2f64: ; CHECK: # %bb.0: @@ -1286,7 +1182,6 @@ define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata) define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i32_v4f16: ; CHECK: # %bb.0: @@ -1298,7 +1193,6 @@ define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata) define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i32_v4f16: ; CHECK: # %bb.0: @@ -1310,7 +1204,6 @@ define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata) define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i32_v4f32: ; CHECK: # %bb.0: @@ -1321,7 +1214,6 @@ define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata) define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i32_v4f32: ; CHECK: # %bb.0: @@ -1332,7 +1224,6 @@ define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata) define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i32_v4f64: ; CHECK: # %bb.0: @@ -1344,7 +1235,6 @@ define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata) define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i32_v4f64: ; CHECK: # %bb.0: @@ -1356,7 +1246,6 @@ define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata) define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i32_v8f16: ; CHECK: # %bb.0: @@ -1368,7 +1257,6 @@ define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata) define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i32_v8f16: ; CHECK: # %bb.0: @@ -1380,7 +1268,6 @@ define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata) define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i32_v8f32: ; CHECK: # %bb.0: @@ -1391,7 +1278,6 @@ define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata) define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i32_v8f32: ; CHECK: # %bb.0: @@ -1402,7 +1288,6 @@ define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata) define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i32_v8f64: ; CHECK: # %bb.0: @@ -1414,7 +1299,6 @@ define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata) define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i32_v8f64: ; CHECK: # %bb.0: @@ -1426,7 +1310,6 @@ define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp { ret <8 x double> %evec } -declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata) define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i32_v16f16: ; CHECK: # %bb.0: @@ -1438,7 +1321,6 @@ define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ret <16 x half> %evec } -declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata) define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i32_v16f16: ; CHECK: # %bb.0: @@ -1450,7 +1332,6 @@ define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp { ret <16 x half> %evec } -declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata) define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ; CHECK-LABEL: vsitofp_v16i32_v16f32: ; CHECK: # %bb.0: @@ -1461,7 +1342,6 @@ define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ret <16 x float> %evec } -declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata) define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ; CHECK-LABEL: vuitofp_v16i32_v16f32: ; CHECK: # %bb.0: @@ -1472,7 +1352,6 @@ define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp { ret <16 x float> %evec } -declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata) define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i64_v1f16: ; CHECK: # %bb.0: @@ -1485,7 +1364,6 @@ define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ret <1 x half> %evec } -declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata) define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i64_v1f16: ; CHECK: # %bb.0: @@ -1498,7 +1376,6 @@ define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp { ret <1 x half> %evec } -declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata) define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i64_v1f32: ; CHECK: # %bb.0: @@ -1510,7 +1387,6 @@ define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ret <1 x float> %evec } -declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata) define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i64_v1f32: ; CHECK: # %bb.0: @@ -1522,7 +1398,6 @@ define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp { ret <1 x float> %evec } -declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata) define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v1i64_v1f64: ; CHECK: # %bb.0: @@ -1533,7 +1408,6 @@ define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ret <1 x double> %evec } -declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata) define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v1i64_v1f64: ; CHECK: # %bb.0: @@ -1544,8 +1418,6 @@ define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp { ret <1 x double> %evec } - -declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata) define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i64_v2f16: ; CHECK: # %bb.0: @@ -1558,7 +1430,6 @@ define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ret <2 x half> %evec } -declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata) define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i64_v2f16: ; CHECK: # %bb.0: @@ -1571,7 +1442,6 @@ define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp { ret <2 x half> %evec } -declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata) define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i64_v2f32: ; CHECK: # %bb.0: @@ -1583,7 +1453,6 @@ define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ret <2 x float> %evec } -declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata) define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i64_v2f32: ; CHECK: # %bb.0: @@ -1595,7 +1464,6 @@ define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp { ret <2 x float> %evec } -declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata) define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v2i64_v2f64: ; CHECK: # %bb.0: @@ -1606,7 +1474,6 @@ define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ret <2 x double> %evec } -declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata) define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v2i64_v2f64: ; CHECK: # %bb.0: @@ -1617,7 +1484,6 @@ define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp { ret <2 x double> %evec } -declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata) define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i64_v4f16: ; CHECK: # %bb.0: @@ -1630,7 +1496,6 @@ define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ret <4 x half> %evec } -declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata) define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i64_v4f16: ; CHECK: # %bb.0: @@ -1643,7 +1508,6 @@ define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp { ret <4 x half> %evec } -declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata) define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i64_v4f32: ; CHECK: # %bb.0: @@ -1655,7 +1519,6 @@ define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ret <4 x float> %evec } -declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata) define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i64_v4f32: ; CHECK: # %bb.0: @@ -1667,7 +1530,6 @@ define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp { ret <4 x float> %evec } -declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata) define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v4i64_v4f64: ; CHECK: # %bb.0: @@ -1678,7 +1540,6 @@ define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ret <4 x double> %evec } -declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata) define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v4i64_v4f64: ; CHECK: # %bb.0: @@ -1689,7 +1550,6 @@ define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp { ret <4 x double> %evec } -declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata) define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i64_v8f16: ; CHECK: # %bb.0: @@ -1702,7 +1562,6 @@ define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ret <8 x half> %evec } -declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata) define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i64_v8f16: ; CHECK: # %bb.0: @@ -1715,7 +1574,6 @@ define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp { ret <8 x half> %evec } -declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata) define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i64_v8f32: ; CHECK: # %bb.0: @@ -1727,7 +1585,6 @@ define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ret <8 x float> %evec } -declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata) define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i64_v8f32: ; CHECK: # %bb.0: @@ -1739,7 +1596,6 @@ define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp { ret <8 x float> %evec } -declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata) define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp { ; CHECK-LABEL: vsitofp_v8i64_v8f64: ; CHECK: # %bb.0: @@ -1750,7 +1606,6 @@ define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp { ret <8 x double> %evec } -declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata) define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp { ; CHECK-LABEL: vuitofp_v8i64_v8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll index f5a31d7eaadbe..3595eec2000e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) -declare <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @vmacc_vv_nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define <2 x i8> @vmacc_vx_nxv2i8_ta(<2 x i8> %a, i8 %b, <2 x i8> %c, <2 x i1> % ret <2 x i8> %u } -declare <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) -declare <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @vmacc_vv_nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define <4 x i8> @vmacc_vx_nxv4i8_ta(<4 x i8> %a, i8 %b, <4 x i8> %c, <4 x i1> % ret <4 x i8> %u } -declare <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) -declare <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @vmacc_vv_nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define <8 x i8> @vmacc_vx_nxv8i8_ta(<8 x i8> %a, i8 %b, <8 x i8> %c, <8 x i1> % ret <8 x i8> %u } -declare <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) -declare <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @vmacc_vv_nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define <16 x i8> @vmacc_vx_nxv16i8_ta(<16 x i8> %a, i8 %b, <16 x i8> %c, <16 x ret <16 x i8> %u } -declare <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) -declare <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) - define <32 x i8> @vmacc_vv_nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define <32 x i8> @vmacc_vx_nxv32i8_ta(<32 x i8> %a, i8 %b, <32 x i8> %c, <32 x ret <32 x i8> %u } -declare <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) -declare <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) - define <64 x i8> @vmacc_vv_nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv64i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define <64 x i8> @vmacc_vx_nxv64i8_ta(<64 x i8> %a, i8 %b, <64 x i8> %c, <64 x ret <64 x i8> %u } -declare <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) -declare <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @vmacc_vv_nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i16: ; CHECK: # %bb.0: @@ -627,11 +592,6 @@ define <2 x i16> @vmacc_vx_nxv2i16_ta(<2 x i16> %a, i16 %b, <2 x i16> %c, <2 x ret <2 x i16> %u } -declare <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) -declare <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @vmacc_vv_nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i16: ; CHECK: # %bb.0: @@ -716,11 +676,6 @@ define <4 x i16> @vmacc_vx_nxv4i16_ta(<4 x i16> %a, i16 %b, <4 x i16> %c, <4 x ret <4 x i16> %u } -declare <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @vmacc_vv_nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i16: ; CHECK: # %bb.0: @@ -805,11 +760,6 @@ define <8 x i16> @vmacc_vx_nxv8i16_ta(<8 x i16> %a, i16 %b, <8 x i16> %c, <8 x ret <8 x i16> %u } -declare <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) -declare <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @vmacc_vv_nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i16: ; CHECK: # %bb.0: @@ -894,11 +844,6 @@ define <16 x i16> @vmacc_vx_nxv16i16_ta(<16 x i16> %a, i16 %b, <16 x i16> %c, < ret <16 x i16> %u } -declare <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) -declare <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) - define <32 x i16> @vmacc_vv_nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i16: ; CHECK: # %bb.0: @@ -983,11 +928,6 @@ define <32 x i16> @vmacc_vx_nxv32i16_ta(<32 x i16> %a, i16 %b, <32 x i16> %c, < ret <32 x i16> %u } -declare <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) -declare <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @vmacc_vv_nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1072,11 +1012,6 @@ define <2 x i32> @vmacc_vx_nxv2i32_ta(<2 x i32> %a, i32 %b, <2 x i32> %c, <2 x ret <2 x i32> %u } -declare <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) -declare <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @vmacc_vv_nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1161,11 +1096,6 @@ define <4 x i32> @vmacc_vx_nxv4i32_ta(<4 x i32> %a, i32 %b, <4 x i32> %c, <4 x ret <4 x i32> %u } -declare <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @vmacc_vv_nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1250,11 +1180,6 @@ define <8 x i32> @vmacc_vx_nxv8i32_ta(<8 x i32> %a, i32 %b, <8 x i32> %c, <8 x ret <8 x i32> %u } -declare <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) -declare <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @vmacc_vv_nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1339,11 +1264,6 @@ define <16 x i32> @vmacc_vx_nxv16i32_ta(<16 x i32> %a, i32 %b, <16 x i32> %c, < ret <16 x i32> %u } -declare <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) -declare <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @vmacc_vv_nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1476,11 +1396,6 @@ define <2 x i64> @vmacc_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c, <2 x ret <2 x i64> %u } -declare <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) -declare <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @vmacc_vv_nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1613,11 +1528,6 @@ define <4 x i64> @vmacc_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c, <4 x ret <4 x i64> %u } -declare <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) -declare <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @vmacc_vv_nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll index ec5845752c29c..f5978de080082 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.smax.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmax_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vmax_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.smax.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmax_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vmax_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.smax.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmax_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i8: ; CHECK: # %bb.0: @@ -124,8 +118,6 @@ define <4 x i8> @vmax_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.smax.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vmax_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v5i8: ; CHECK: # %bb.0: @@ -170,8 +162,6 @@ define <5 x i8> @vmax_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.smax.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmax_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i8: ; CHECK: # %bb.0: @@ -216,8 +206,6 @@ define <8 x i8> @vmax_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.smax.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmax_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i8: ; CHECK: # %bb.0: @@ -262,8 +250,6 @@ define <16 x i8> @vmax_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.smax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v258i8: ; CHECK: # %bb.0: @@ -351,8 +337,6 @@ define <256 x i8> @vmax_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.smax.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmax_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i16: ; CHECK: # %bb.0: @@ -397,8 +381,6 @@ define <2 x i16> @vmax_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.smax.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmax_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i16: ; CHECK: # %bb.0: @@ -443,8 +425,6 @@ define <4 x i16> @vmax_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.smax.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmax_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i16: ; CHECK: # %bb.0: @@ -489,8 +469,6 @@ define <8 x i16> @vmax_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.smax.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmax_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i16: ; CHECK: # %bb.0: @@ -535,8 +513,6 @@ define <16 x i16> @vmax_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.smax.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmax_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i32: ; CHECK: # %bb.0: @@ -581,8 +557,6 @@ define <2 x i32> @vmax_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmax_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i32: ; CHECK: # %bb.0: @@ -627,8 +601,6 @@ define <4 x i32> @vmax_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.smax.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmax_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i32: ; CHECK: # %bb.0: @@ -673,8 +645,6 @@ define <8 x i32> @vmax_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.smax.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmax_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i32: ; CHECK: # %bb.0: @@ -719,8 +689,6 @@ define <16 x i32> @vmax_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.smax.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmax_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v2i64: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define <2 x i64> @vmax_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.smax.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmax_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v4i64: ; CHECK: # %bb.0: @@ -871,8 +837,6 @@ define <4 x i64> @vmax_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.smax.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmax_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v8i64: ; CHECK: # %bb.0: @@ -947,8 +911,6 @@ define <8 x i64> @vmax_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.smax.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmax_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_v16i64: ; CHECK: # %bb.0: @@ -1025,8 +987,6 @@ define <16 x i64> @vmax_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.smax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll index 2ffd3318d8759..7450a70df66ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.umax.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmaxu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vmaxu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.umax.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmaxu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vmaxu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.umax.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmaxu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i8: ; CHECK: # %bb.0: @@ -123,8 +117,6 @@ define <4 x i8> @vmaxu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.umax.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vmaxu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v5i8: ; CHECK: # %bb.0: @@ -169,8 +161,6 @@ define <5 x i8> @vmaxu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.umax.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmaxu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i8: ; CHECK: # %bb.0: @@ -215,8 +205,6 @@ define <8 x i8> @vmaxu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.umax.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmaxu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i8: ; CHECK: # %bb.0: @@ -261,8 +249,6 @@ define <16 x i8> @vmaxu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.umax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v258i8: ; CHECK: # %bb.0: @@ -350,8 +336,6 @@ define <256 x i8> @vmaxu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.umax.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmaxu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i16: ; CHECK: # %bb.0: @@ -396,8 +380,6 @@ define <2 x i16> @vmaxu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.umax.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmaxu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i16: ; CHECK: # %bb.0: @@ -442,8 +424,6 @@ define <4 x i16> @vmaxu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.umax.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmaxu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i16: ; CHECK: # %bb.0: @@ -488,8 +468,6 @@ define <8 x i16> @vmaxu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.umax.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmaxu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i16: ; CHECK: # %bb.0: @@ -534,8 +512,6 @@ define <16 x i16> @vmaxu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.umax.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmaxu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i32: ; CHECK: # %bb.0: @@ -580,8 +556,6 @@ define <2 x i32> @vmaxu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmaxu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i32: ; CHECK: # %bb.0: @@ -626,8 +600,6 @@ define <4 x i32> @vmaxu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.umax.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmaxu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i32: ; CHECK: # %bb.0: @@ -672,8 +644,6 @@ define <8 x i32> @vmaxu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.umax.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmaxu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i32: ; CHECK: # %bb.0: @@ -718,8 +688,6 @@ define <16 x i32> @vmaxu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.umax.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmaxu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v2i64: ; CHECK: # %bb.0: @@ -794,8 +762,6 @@ define <2 x i64> @vmaxu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.umax.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmaxu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v4i64: ; CHECK: # %bb.0: @@ -870,8 +836,6 @@ define <4 x i64> @vmaxu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.umax.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmaxu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v8i64: ; CHECK: # %bb.0: @@ -946,8 +910,6 @@ define <8 x i64> @vmaxu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.umax.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmaxu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_v16i64: ; CHECK: # %bb.0: @@ -1024,8 +986,6 @@ define <16 x i64> @vmaxu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.umax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll index 53649c77098f2..31d19304c2909 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.smin.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmin_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vmin_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.smin.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmin_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vmin_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.smin.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmin_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i8: ; CHECK: # %bb.0: @@ -124,8 +118,6 @@ define <4 x i8> @vmin_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.smin.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vmin_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v5i8: ; CHECK: # %bb.0: @@ -170,8 +162,6 @@ define <5 x i8> @vmin_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.smin.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmin_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i8: ; CHECK: # %bb.0: @@ -216,8 +206,6 @@ define <8 x i8> @vmin_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.smin.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmin_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i8: ; CHECK: # %bb.0: @@ -262,8 +250,6 @@ define <16 x i8> @vmin_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.smin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v258i8: ; CHECK: # %bb.0: @@ -351,8 +337,6 @@ define <256 x i8> @vmin_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.smin.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmin_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i16: ; CHECK: # %bb.0: @@ -397,8 +381,6 @@ define <2 x i16> @vmin_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.smin.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmin_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i16: ; CHECK: # %bb.0: @@ -443,8 +425,6 @@ define <4 x i16> @vmin_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.smin.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmin_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i16: ; CHECK: # %bb.0: @@ -489,8 +469,6 @@ define <8 x i16> @vmin_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.smin.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmin_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i16: ; CHECK: # %bb.0: @@ -535,8 +513,6 @@ define <16 x i16> @vmin_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.smin.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmin_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i32: ; CHECK: # %bb.0: @@ -581,8 +557,6 @@ define <2 x i32> @vmin_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmin_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i32: ; CHECK: # %bb.0: @@ -627,8 +601,6 @@ define <4 x i32> @vmin_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.smin.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmin_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i32: ; CHECK: # %bb.0: @@ -673,8 +645,6 @@ define <8 x i32> @vmin_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.smin.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmin_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i32: ; CHECK: # %bb.0: @@ -719,8 +689,6 @@ define <16 x i32> @vmin_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.smin.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmin_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v2i64: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define <2 x i64> @vmin_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.smin.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmin_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v4i64: ; CHECK: # %bb.0: @@ -871,8 +837,6 @@ define <4 x i64> @vmin_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.smin.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmin_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v8i64: ; CHECK: # %bb.0: @@ -947,8 +911,6 @@ define <8 x i64> @vmin_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.smin.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmin_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_v16i64: ; CHECK: # %bb.0: @@ -1025,8 +987,6 @@ define <16 x i64> @vmin_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.smin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll index 76b5be39f2d93..dda69ec8a7d2e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.umin.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vminu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vminu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.umin.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vminu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vminu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.umin.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vminu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i8: ; CHECK: # %bb.0: @@ -123,8 +117,6 @@ define <4 x i8> @vminu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.umin.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vminu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v5i8: ; CHECK: # %bb.0: @@ -169,8 +161,6 @@ define <5 x i8> @vminu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.umin.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vminu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i8: ; CHECK: # %bb.0: @@ -215,8 +205,6 @@ define <8 x i8> @vminu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.umin.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vminu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i8: ; CHECK: # %bb.0: @@ -261,8 +249,6 @@ define <16 x i8> @vminu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.umin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v258i8: ; CHECK: # %bb.0: @@ -350,8 +336,6 @@ define <256 x i8> @vminu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.umin.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vminu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i16: ; CHECK: # %bb.0: @@ -396,8 +380,6 @@ define <2 x i16> @vminu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.umin.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vminu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i16: ; CHECK: # %bb.0: @@ -442,8 +424,6 @@ define <4 x i16> @vminu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.umin.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vminu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i16: ; CHECK: # %bb.0: @@ -488,8 +468,6 @@ define <8 x i16> @vminu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.umin.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vminu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i16: ; CHECK: # %bb.0: @@ -534,8 +512,6 @@ define <16 x i16> @vminu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.umin.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vminu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i32: ; CHECK: # %bb.0: @@ -580,8 +556,6 @@ define <2 x i32> @vminu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.umin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vminu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i32: ; CHECK: # %bb.0: @@ -626,8 +600,6 @@ define <4 x i32> @vminu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.umin.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vminu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i32: ; CHECK: # %bb.0: @@ -672,8 +644,6 @@ define <8 x i32> @vminu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.umin.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vminu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i32: ; CHECK: # %bb.0: @@ -718,8 +688,6 @@ define <16 x i32> @vminu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.umin.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vminu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v2i64: ; CHECK: # %bb.0: @@ -794,8 +762,6 @@ define <2 x i64> @vminu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.umin.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vminu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v4i64: ; CHECK: # %bb.0: @@ -870,8 +836,6 @@ define <4 x i64> @vminu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.umin.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vminu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v8i64: ; CHECK: # %bb.0: @@ -946,8 +910,6 @@ define <8 x i64> @vminu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.umin.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vminu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_v16i64: ; CHECK: # %bb.0: @@ -1024,8 +986,6 @@ define <16 x i64> @vminu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.umin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll index 3824ed76625df..40119a20ad07c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare <2 x i1> @llvm.vp.mul.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroex ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.mul.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroex ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.mul.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroex ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.mul.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 z ret <16 x i1> %v } -declare <32 x i1> @llvm.vp.mul.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32) - define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v32i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 z ret <32 x i1> %v } -declare <64 x i1> @llvm.vp.mul.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32) - define <64 x i1> @vmul_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll index 26000033bd1db..9920a34520664 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.mul.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.mul.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vmul_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i8: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define <2 x i8> @vmul_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.mul.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vmul_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i8: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define <4 x i8> @vmul_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.mul.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vmul_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i8: ; CHECK: # %bb.0: @@ -154,8 +146,6 @@ define <8 x i8> @vmul_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.mul.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vmul_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i8: ; CHECK: # %bb.0: @@ -200,8 +190,6 @@ define <16 x i8> @vmul_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.mul.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vmul_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i16: ; CHECK: # %bb.0: @@ -246,8 +234,6 @@ define <2 x i16> @vmul_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.mul.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vmul_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i16: ; CHECK: # %bb.0: @@ -292,8 +278,6 @@ define <4 x i16> @vmul_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.mul.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vmul_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i16: ; CHECK: # %bb.0: @@ -350,8 +334,6 @@ define <8 x i16> @vmul_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <12 x i16> @llvm.vp.mul.v12i16(<12 x i16>, <12 x i16>, <12 x i1>, i32) - define <12 x i16> @vmul_vv_v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v12i16: ; CHECK: # %bb.0: @@ -396,8 +378,6 @@ define <12 x i16> @vmul_vx_v12i16_unmasked(<12 x i16> %va, i16 %b, i32 zeroext % ret <12 x i16> %v } -declare <16 x i16> @llvm.vp.mul.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vmul_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i16: ; CHECK: # %bb.0: @@ -442,8 +422,6 @@ define <16 x i16> @vmul_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.mul.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vmul_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i32: ; CHECK: # %bb.0: @@ -488,8 +466,6 @@ define <2 x i32> @vmul_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmul_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i32: ; CHECK: # %bb.0: @@ -534,8 +510,6 @@ define <4 x i32> @vmul_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vmul_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i32: ; CHECK: # %bb.0: @@ -580,8 +554,6 @@ define <8 x i32> @vmul_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.mul.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vmul_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i32: ; CHECK: # %bb.0: @@ -626,8 +598,6 @@ define <16 x i32> @vmul_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.mul.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vmul_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i64: ; CHECK: # %bb.0: @@ -702,8 +672,6 @@ define <2 x i64> @vmul_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.mul.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vmul_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i64: ; CHECK: # %bb.0: @@ -778,8 +746,6 @@ define <4 x i64> @vmul_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.mul.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmul_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i64: ; CHECK: # %bb.0: @@ -854,8 +820,6 @@ define <8 x i64> @vmul_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.mul.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vmul_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i64: ; CHECK: # %bb.0: @@ -930,7 +894,6 @@ define <16 x i64> @vmul_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ret <16 x i64> %v } - define <8 x i64> @vmul_vv_undef_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vmul_vv_undef_v8i64: ; RV32: # %bb.0: @@ -1105,8 +1068,6 @@ define <8 x i64> @vmul_vx_negpow2_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl ret <8 x i64> %v } -declare <8 x i64> @llvm.vp.shl.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmul_vshl_vx_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vshl_vx_v8i64: ; CHECK: # %bb.0: @@ -1172,8 +1133,6 @@ define <8 x i64> @vmul_vshl_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %vb, i32 ret <8 x i64> %v } -declare <8 x i64> @llvm.vp.add.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vmul_vadd_vx_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vadd_vx_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll index b8798fe6c63dc..cc492f3c6dcb6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) -declare <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) -declare <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @vnmsac_vv_nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define <2 x i8> @vnmsac_vx_nxv2i8_ta(<2 x i8> %a, i8 %b, <2 x i8> %c, <2 x i1> ret <2 x i8> %u } -declare <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) -declare <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) -declare <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @vnmsac_vv_nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define <4 x i8> @vnmsac_vx_nxv4i8_ta(<4 x i8> %a, i8 %b, <4 x i8> %c, <4 x i1> ret <4 x i8> %u } -declare <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) -declare <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) -declare <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @vnmsac_vv_nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define <8 x i8> @vnmsac_vx_nxv8i8_ta(<8 x i8> %a, i8 %b, <8 x i8> %c, <8 x i1> ret <8 x i8> %u } -declare <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) -declare <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) -declare <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @vnmsac_vv_nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define <16 x i8> @vnmsac_vx_nxv16i8_ta(<16 x i8> %a, i8 %b, <16 x i8> %c, <16 x ret <16 x i8> %u } -declare <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8>, <32 x i8>, <32 x i1>, i32) -declare <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) -declare <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1>, <32 x i8>, <32 x i8>, i32) - define <32 x i8> @vnmsac_vv_nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define <32 x i8> @vnmsac_vx_nxv32i8_ta(<32 x i8> %a, i8 %b, <32 x i8> %c, <32 x ret <32 x i8> %u } -declare <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8>, <64 x i8>, <64 x i1>, i32) -declare <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) -declare <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1>, <64 x i8>, <64 x i8>, i32) - define <64 x i8> @vnmsac_vv_nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv64i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define <64 x i8> @vnmsac_vx_nxv64i8_ta(<64 x i8> %a, i8 %b, <64 x i8> %c, <64 x ret <64 x i8> %u } -declare <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) -declare <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) -declare <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @vnmsac_vv_nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i16: ; CHECK: # %bb.0: @@ -627,11 +592,6 @@ define <2 x i16> @vnmsac_vx_nxv2i16_ta(<2 x i16> %a, i16 %b, <2 x i16> %c, <2 x ret <2 x i16> %u } -declare <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) -declare <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) -declare <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @vnmsac_vv_nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i16: ; CHECK: # %bb.0: @@ -716,11 +676,6 @@ define <4 x i16> @vnmsac_vx_nxv4i16_ta(<4 x i16> %a, i16 %b, <4 x i16> %c, <4 x ret <4 x i16> %u } -declare <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) -declare <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @vnmsac_vv_nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i16: ; CHECK: # %bb.0: @@ -805,11 +760,6 @@ define <8 x i16> @vnmsac_vx_nxv8i16_ta(<8 x i16> %a, i16 %b, <8 x i16> %c, <8 x ret <8 x i16> %u } -declare <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) -declare <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) -declare <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @vnmsac_vv_nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i16: ; CHECK: # %bb.0: @@ -894,11 +844,6 @@ define <16 x i16> @vnmsac_vx_nxv16i16_ta(<16 x i16> %a, i16 %b, <16 x i16> %c, ret <16 x i16> %u } -declare <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16>, <32 x i16>, <32 x i1>, i32) -declare <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) -declare <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1>, <32 x i16>, <32 x i16>, i32) - define <32 x i16> @vnmsac_vv_nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i16: ; CHECK: # %bb.0: @@ -983,11 +928,6 @@ define <32 x i16> @vnmsac_vx_nxv32i16_ta(<32 x i16> %a, i16 %b, <32 x i16> %c, ret <32 x i16> %u } -declare <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) -declare <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) -declare <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @vnmsac_vv_nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1072,11 +1012,6 @@ define <2 x i32> @vnmsac_vx_nxv2i32_ta(<2 x i32> %a, i32 %b, <2 x i32> %c, <2 x ret <2 x i32> %u } -declare <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) -declare <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) -declare <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @vnmsac_vv_nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1161,11 +1096,6 @@ define <4 x i32> @vnmsac_vx_nxv4i32_ta(<4 x i32> %a, i32 %b, <4 x i32> %c, <4 x ret <4 x i32> %u } -declare <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) -declare <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @vnmsac_vv_nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1250,11 +1180,6 @@ define <8 x i32> @vnmsac_vx_nxv8i32_ta(<8 x i32> %a, i32 %b, <8 x i32> %c, <8 x ret <8 x i32> %u } -declare <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) -declare <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) -declare <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @vnmsac_vv_nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1339,11 +1264,6 @@ define <16 x i32> @vnmsac_vx_nxv16i32_ta(<16 x i32> %a, i32 %b, <16 x i32> %c, ret <16 x i32> %u } -declare <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) -declare <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) -declare <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @vnmsac_vv_nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1476,11 +1396,6 @@ define <2 x i64> @vnmsac_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c, <2 x ret <2 x i64> %u } -declare <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) -declare <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) -declare <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @vnmsac_vv_nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1613,11 +1528,6 @@ define <4 x i64> @vnmsac_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c, <4 x ret <4 x i64> %u } -declare <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) -declare <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) -declare <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @vnmsac_vv_nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll index 2ebd008f8dbe7..7127d2318d39b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.or.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.or.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i8: ; CHECK: # %bb.0: @@ -82,8 +78,6 @@ define <2 x i8> @vor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.or.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i8: ; CHECK: # %bb.0: @@ -160,8 +154,6 @@ define <4 x i8> @vor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <7 x i8> @llvm.vp.or.v5i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vor_vv_v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v5i8: ; CHECK: # %bb.0: @@ -226,8 +218,6 @@ define <7 x i8> @vor_vi_v5i8_unmasked(<7 x i8> %va, i32 zeroext %evl) { ret <7 x i8> %v } -declare <8 x i8> @llvm.vp.or.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i8: ; CHECK: # %bb.0: @@ -292,8 +282,6 @@ define <8 x i8> @vor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.or.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i8: ; CHECK: # %bb.0: @@ -358,8 +346,6 @@ define <16 x i8> @vor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.or.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i16: ; CHECK: # %bb.0: @@ -424,8 +410,6 @@ define <2 x i16> @vor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.or.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i16: ; CHECK: # %bb.0: @@ -490,8 +474,6 @@ define <4 x i16> @vor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.or.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i16: ; CHECK: # %bb.0: @@ -556,8 +538,6 @@ define <8 x i16> @vor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.or.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i16: ; CHECK: # %bb.0: @@ -622,8 +602,6 @@ define <16 x i16> @vor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.or.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i32: ; CHECK: # %bb.0: @@ -688,8 +666,6 @@ define <2 x i32> @vor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i32: ; CHECK: # %bb.0: @@ -754,8 +730,6 @@ define <4 x i32> @vor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.or.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i32: ; CHECK: # %bb.0: @@ -820,8 +794,6 @@ define <8 x i32> @vor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.or.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i32: ; CHECK: # %bb.0: @@ -886,8 +858,6 @@ define <16 x i32> @vor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.or.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i64: ; CHECK: # %bb.0: @@ -982,8 +952,6 @@ define <2 x i64> @vor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.or.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i64: ; CHECK: # %bb.0: @@ -1078,8 +1046,6 @@ define <4 x i64> @vor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.or.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i64: ; CHECK: # %bb.0: @@ -1174,8 +1140,6 @@ define <8 x i64> @vor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.or.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll index 27f16f0285e12..60fc1771de09b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverse-int.ll @@ -120,7 +120,3 @@ define <16 x i8> @test_vp_reverse_v16i8(<16 x i8> %src, i32 zeroext %evl) { ret <16 x i8> %dst } -declare <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64>,<2 x i1>,i32) -declare <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32>,<4 x i1>,i32) -declare <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16>,<8 x i1>,i32) -declare <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8>,<16 x i1>,i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll index 352666de57881..3f5751aaa2cad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare <2 x i8> @llvm.vp.gather.v2i8.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i8> @vpgather_v2i8(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i8: ; RV32: # %bb.0: @@ -154,8 +152,6 @@ define <2 x i64> @vpgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i32 ret <2 x i64> %ev } -declare <3 x i8> @llvm.vp.gather.v3i8.v3p0(<3 x ptr>, <3 x i1>, i32) - define <3 x i8> @vpgather_v3i8(<3 x ptr> %ptrs, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v3i8: ; RV32: # %bb.0: @@ -192,8 +188,6 @@ define <3 x i8> @vpgather_truemask_v3i8(<3 x ptr> %ptrs, i32 zeroext %evl) { ret <3 x i8> %v } -declare <4 x i8> @llvm.vp.gather.v4i8.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i8> @vpgather_v4i8(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i8: ; RV32: # %bb.0: @@ -230,8 +224,6 @@ define <4 x i8> @vpgather_truemask_v4i8(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.gather.v8i8.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i8> @vpgather_v8i8(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i8: ; RV32: # %bb.0: @@ -271,8 +263,6 @@ define <8 x i8> @vpgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, i ret <8 x i8> %v } -declare <32 x i8> @llvm.vp.gather.v32i8.v32p0(<32 x ptr>, <32 x i1>, i32) - define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v32i8: ; RV32: # %bb.0: @@ -317,8 +307,6 @@ define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> % ret <32 x i8> %v } -declare <2 x i16> @llvm.vp.gather.v2i16.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i16> @vpgather_v2i16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i16: ; RV32: # %bb.0: @@ -421,8 +409,6 @@ define <2 x i64> @vpgather_v2i16_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i3 ret <2 x i64> %ev } -declare <4 x i16> @llvm.vp.gather.v4i16.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i16> @vpgather_v4i16(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i16: ; RV32: # %bb.0: @@ -459,8 +445,6 @@ define <4 x i16> @vpgather_truemask_v4i16(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.gather.v8i16.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i16> @vpgather_v8i16(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i16: ; RV32: # %bb.0: @@ -570,8 +554,6 @@ define <8 x i16> @vpgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m ret <8 x i16> %v } -declare <2 x i32> @llvm.vp.gather.v2i32.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i32> @vpgather_v2i32(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i32: ; RV32: # %bb.0: @@ -631,8 +613,6 @@ define <2 x i64> @vpgather_v2i32_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i3 ret <2 x i64> %ev } -declare <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i32> @vpgather_v4i32(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i32: ; RV32: # %bb.0: @@ -667,8 +647,6 @@ define <4 x i32> @vpgather_truemask_v4i32(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.gather.v8i32.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i32> @vpgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i32: ; RV32: # %bb.0: @@ -850,8 +828,6 @@ define <8 x i32> @vpgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m ret <8 x i32> %v } -declare <2 x i64> @llvm.vp.gather.v2i64.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x i64> @vpgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2i64: ; RV32: # %bb.0: @@ -869,8 +845,6 @@ define <2 x i64> @vpgather_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.gather.v4i64.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i64> @vpgather_v4i64(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4i64: ; RV32: # %bb.0: @@ -905,8 +879,6 @@ define <4 x i64> @vpgather_truemask_v4i64(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.gather.v8i64.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x i64> @vpgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8i64: ; RV32: # %bb.0: @@ -1156,8 +1128,6 @@ define <8 x i64> @vpgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m ret <8 x i64> %v } -declare <2 x bfloat> @llvm.vp.gather.v2bf16.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x bfloat> @vpgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2bf16: ; RV32: # %bb.0: @@ -1176,8 +1146,6 @@ define <2 x bfloat> @vpgather_v2bf16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext % ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.vp.gather.v4bf16.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x bfloat> @vpgather_v4bf16(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4bf16: ; RV32: # %bb.0: @@ -1214,8 +1182,6 @@ define <4 x bfloat> @vpgather_truemask_v4bf16(<4 x ptr> %ptrs, i32 zeroext %evl) ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.vp.gather.v8bf16.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x bfloat> @vpgather_v8bf16(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8bf16: ; RV32: # %bb.0: @@ -1325,8 +1291,6 @@ define <8 x bfloat> @vpgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1 ret <8 x bfloat> %v } -declare <2 x half> @llvm.vp.gather.v2f16.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x half> @vpgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f16: ; RV32: # %bb.0: @@ -1345,8 +1309,6 @@ define <2 x half> @vpgather_v2f16(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl ret <2 x half> %v } -declare <4 x half> @llvm.vp.gather.v4f16.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x half> @vpgather_v4f16(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f16: ; RV32: # %bb.0: @@ -1383,8 +1345,6 @@ define <4 x half> @vpgather_truemask_v4f16(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x half> %v } -declare <8 x half> @llvm.vp.gather.v8f16.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x half> @vpgather_v8f16(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f16: ; RV32: # %bb.0: @@ -1494,8 +1454,6 @@ define <8 x half> @vpgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> % ret <8 x half> %v } -declare <2 x float> @llvm.vp.gather.v2f32.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x float> @vpgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f32: ; RV32: # %bb.0: @@ -1513,8 +1471,6 @@ define <2 x float> @vpgather_v2f32(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %ev ret <2 x float> %v } -declare <4 x float> @llvm.vp.gather.v4f32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x float> @vpgather_v4f32(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f32: ; RV32: # %bb.0: @@ -1549,8 +1505,6 @@ define <4 x float> @vpgather_truemask_v4f32(<4 x ptr> %ptrs, i32 zeroext %evl) { ret <4 x float> %v } -declare <8 x float> @llvm.vp.gather.v8f32.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x float> @vpgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f32: ; RV32: # %bb.0: @@ -1732,8 +1686,6 @@ define <8 x float> @vpgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> ret <8 x float> %v } -declare <2 x double> @llvm.vp.gather.v2f64.v2p0(<2 x ptr>, <2 x i1>, i32) - define <2 x double> @vpgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v2f64: ; RV32: # %bb.0: @@ -1751,8 +1703,6 @@ define <2 x double> @vpgather_v2f64(<2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %e ret <2 x double> %v } -declare <4 x double> @llvm.vp.gather.v4f64.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x double> @vpgather_v4f64(<4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v4f64: ; RV32: # %bb.0: @@ -1787,8 +1737,6 @@ define <4 x double> @vpgather_truemask_v4f64(<4 x ptr> %ptrs, i32 zeroext %evl) ret <4 x double> %v } -declare <8 x double> @llvm.vp.gather.v8f64.v8p0(<8 x ptr>, <8 x i1>, i32) - define <8 x double> @vpgather_v8f64(<8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v8f64: ; RV32: # %bb.0: @@ -2038,8 +1986,6 @@ define <8 x double> @vpgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1> ret <8 x double> %v } -declare <32 x double> @llvm.vp.gather.v32f64.v32p0(<32 x ptr>, <32 x i1>, i32) - define <32 x double> @vpgather_v32f64(<32 x ptr> %ptrs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll index 8e2e8f3fb0dec..d058669c103f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x i8> @llvm.vp.load.v2i8.p0(ptr, <2 x i1>, i32) - define <2 x i8> @vpload_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i8: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i8> @vpload_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i8> %load } -declare <3 x i8> @llvm.vp.load.v3i8.p0(ptr, <3 x i1>, i32) - define <3 x i8> @vpload_v3i8(ptr %ptr, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v3i8: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <3 x i8> @vpload_v3i8(ptr %ptr, <3 x i1> %m, i32 zeroext %evl) { ret <3 x i8> %load } -declare <4 x i8> @llvm.vp.load.v4i8.p0(ptr, <4 x i1>, i32) - define <4 x i8> @vpload_v4i8(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i8: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define <4 x i8> @vpload_v4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <4 x i8> %load } -declare <8 x i8> @llvm.vp.load.v8i8.p0(ptr, <8 x i1>, i32) - define <8 x i8> @vpload_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i8: ; CHECK: # %bb.0: @@ -62,8 +54,6 @@ define <8 x i8> @vpload_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i8> %load } -declare <2 x i16> @llvm.vp.load.v2i16.p0(ptr, <2 x i1>, i32) - define <2 x i16> @vpload_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i16: ; CHECK: # %bb.0: @@ -74,8 +64,6 @@ define <2 x i16> @vpload_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i16> %load } -declare <4 x i16> @llvm.vp.load.v4i16.p0(ptr, <4 x i1>, i32) - define <4 x i16> @vpload_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i16: ; CHECK: # %bb.0: @@ -86,8 +74,6 @@ define <4 x i16> @vpload_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i16> %load } -declare <8 x i16> @llvm.vp.load.v8i16.p0(ptr, <8 x i1>, i32) - define <8 x i16> @vpload_v8i16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i16: ; CHECK: # %bb.0: @@ -108,8 +94,6 @@ define <8 x i16> @vpload_v8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <8 x i16> %load } -declare <2 x i32> @llvm.vp.load.v2i32.p0(ptr, <2 x i1>, i32) - define <2 x i32> @vpload_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i32: ; CHECK: # %bb.0: @@ -120,8 +104,6 @@ define <2 x i32> @vpload_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i32> %load } -declare <4 x i32> @llvm.vp.load.v4i32.p0(ptr, <4 x i1>, i32) - define <4 x i32> @vpload_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i32: ; CHECK: # %bb.0: @@ -132,8 +114,6 @@ define <4 x i32> @vpload_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i32> %load } -declare <6 x i32> @llvm.vp.load.v6i32.p0(ptr, <6 x i1>, i32) - define <6 x i32> @vpload_v6i32(ptr %ptr, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v6i32: ; CHECK: # %bb.0: @@ -154,8 +134,6 @@ define <6 x i32> @vpload_v6i32_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <6 x i32> %load } -declare <8 x i32> @llvm.vp.load.v8i32.p0(ptr, <8 x i1>, i32) - define <8 x i32> @vpload_v8i32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i32: ; CHECK: # %bb.0: @@ -176,8 +154,6 @@ define <8 x i32> @vpload_v8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <8 x i32> %load } -declare <2 x i64> @llvm.vp.load.v2i64.p0(ptr, <2 x i1>, i32) - define <2 x i64> @vpload_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2i64: ; CHECK: # %bb.0: @@ -188,8 +164,6 @@ define <2 x i64> @vpload_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i64> %load } -declare <4 x i64> @llvm.vp.load.v4i64.p0(ptr, <4 x i1>, i32) - define <4 x i64> @vpload_v4i64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i64: ; CHECK: # %bb.0: @@ -210,8 +184,6 @@ define <4 x i64> @vpload_v4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <4 x i64> %load } -declare <8 x i64> @llvm.vp.load.v8i64.p0(ptr, <8 x i1>, i32) - define <8 x i64> @vpload_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i64: ; CHECK: # %bb.0: @@ -222,8 +194,6 @@ define <8 x i64> @vpload_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i64> %load } -declare <2 x half> @llvm.vp.load.v2f16.p0(ptr, <2 x i1>, i32) - define <2 x half> @vpload_v2f16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f16: ; CHECK: # %bb.0: @@ -244,8 +214,6 @@ define <2 x half> @vpload_v2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <2 x half> %load } -declare <4 x half> @llvm.vp.load.v4f16.p0(ptr, <4 x i1>, i32) - define <4 x half> @vpload_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f16: ; CHECK: # %bb.0: @@ -256,8 +224,6 @@ define <4 x half> @vpload_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x half> %load } -declare <8 x half> @llvm.vp.load.v8f16.p0(ptr, <8 x i1>, i32) - define <8 x half> @vpload_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f16: ; CHECK: # %bb.0: @@ -268,8 +234,6 @@ define <8 x half> @vpload_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x half> %load } -declare <2 x float> @llvm.vp.load.v2f32.p0(ptr, <2 x i1>, i32) - define <2 x float> @vpload_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f32: ; CHECK: # %bb.0: @@ -280,8 +244,6 @@ define <2 x float> @vpload_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x float> %load } -declare <4 x float> @llvm.vp.load.v4f32.p0(ptr, <4 x i1>, i32) - define <4 x float> @vpload_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f32: ; CHECK: # %bb.0: @@ -292,8 +254,6 @@ define <4 x float> @vpload_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ret <4 x float> %load } -declare <8 x float> @llvm.vp.load.v8f32.p0(ptr, <8 x i1>, i32) - define <8 x float> @vpload_v8f32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f32: ; CHECK: # %bb.0: @@ -314,8 +274,6 @@ define <8 x float> @vpload_v8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <8 x float> %load } -declare <2 x double> @llvm.vp.load.v2f64.p0(ptr, <2 x i1>, i32) - define <2 x double> @vpload_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f64: ; CHECK: # %bb.0: @@ -326,8 +284,6 @@ define <2 x double> @vpload_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ret <2 x double> %load } -declare <4 x double> @llvm.vp.load.v4f64.p0(ptr, <4 x i1>, i32) - define <4 x double> @vpload_v4f64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f64: ; CHECK: # %bb.0: @@ -348,8 +304,6 @@ define <4 x double> @vpload_v4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { ret <4 x double> %load } -declare <8 x double> @llvm.vp.load.v8f64.p0(ptr, <8 x i1>, i32) - define <8 x double> @vpload_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f64: ; CHECK: # %bb.0: @@ -360,8 +314,6 @@ define <8 x double> @vpload_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ret <8 x double> %load } -declare <32 x double> @llvm.vp.load.v32f64.p0(ptr, <32 x i1>, i32) - define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v32f64: ; CHECK: # %bb.0: @@ -387,8 +339,6 @@ define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { ret <32 x double> %load } -declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32) - ; Widen to v64f64 then split into 4 x v16f64, of which 1 is empty. define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll index 4186a6b304a22..844e0213989bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge-bf16.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+m,+zvfh,+zfbfmin,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x bfloat> @llvm.vp.merge.v2bf16(<2 x i1>, <2 x bfloat>, <2 x bfloat>, i32) - define <2 x bfloat> @vpmerge_vv_v2bf16(<2 x bfloat> %va, <2 x bfloat> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2bf16: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define <2 x bfloat> @vpmerge_vf_v2bf16(bfloat %a, <2 x bfloat> %vb, <2 x i1> %m, ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.vp.merge.v4bf16(<4 x i1>, <4 x bfloat>, <4 x bfloat>, i32) - define <4 x bfloat> @vpmerge_vv_v4bf16(<4 x bfloat> %va, <4 x bfloat> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4bf16: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define <4 x bfloat> @vpmerge_vf_v4bf16(bfloat %a, <4 x bfloat> %vb, <4 x i1> %m, ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.vp.merge.v8bf16(<8 x i1>, <8 x bfloat>, <8 x bfloat>, i32) - define <8 x bfloat> @vpmerge_vv_v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8bf16: ; CHECK: # %bb.0: @@ -92,8 +86,6 @@ define <8 x bfloat> @vpmerge_vf_v8bf16(bfloat %a, <8 x bfloat> %vb, <8 x i1> %m, ret <8 x bfloat> %v } -declare <16 x bfloat> @llvm.vp.merge.v16bf16(<16 x i1>, <16 x bfloat>, <16 x bfloat>, i32) - define <16 x bfloat> @vpmerge_vv_v16bf16(<16 x bfloat> %va, <16 x bfloat> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll index 4299707c9a48c..7968c5190eb01 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare <4 x i1> @llvm.vp.merge.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vpmerge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpmerge_vv_v4i1: ; RV32: # %bb.0: @@ -234,8 +232,6 @@ define <64 x i1> @vpmerge_vv_v64i1(<64 x i1> %va, <64 x i1> %vb, <64 x i1> %m, i ret <64 x i1> %v } -declare <2 x i8> @llvm.vp.merge.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @vpmerge_vv_v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i8: ; CHECK: # %bb.0: @@ -269,8 +265,6 @@ define <2 x i8> @vpmerge_vi_v2i8(<2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.merge.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @vpmerge_vv_v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i8: ; CHECK: # %bb.0: @@ -304,8 +298,6 @@ define <4 x i8> @vpmerge_vi_v4i8(<4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.merge.v6i8(<6 x i1>, <6 x i8>, <6 x i8>, i32) - define <6 x i8> @vpmerge_vv_v6i8(<6 x i8> %va, <6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v6i8: ; CHECK: # %bb.0: @@ -339,8 +331,6 @@ define <6 x i8> @vpmerge_vi_v6i8(<6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ret <6 x i8> %v } -declare <8 x i7> @llvm.vp.merge.v8i7(<8 x i1>, <8 x i7>, <8 x i7>, i32) - define <8 x i7> @vpmerge_vv_v8i7(<8 x i7> %va, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i7: ; CHECK: # %bb.0: @@ -374,8 +364,6 @@ define <8 x i7> @vpmerge_vi_v8i7(<8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i7> %v } -declare <8 x i8> @llvm.vp.merge.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @vpmerge_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i8: ; CHECK: # %bb.0: @@ -409,8 +397,6 @@ define <8 x i8> @vpmerge_vi_v8i8(<8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.merge.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @vpmerge_vv_v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i8: ; CHECK: # %bb.0: @@ -444,8 +430,6 @@ define <16 x i8> @vpmerge_vi_v16i8(<16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.merge.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @vpmerge_vv_v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i16: ; CHECK: # %bb.0: @@ -479,8 +463,6 @@ define <2 x i16> @vpmerge_vi_v2i16(<2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.merge.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @vpmerge_vv_v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i16: ; CHECK: # %bb.0: @@ -514,8 +496,6 @@ define <4 x i16> @vpmerge_vi_v4i16(<4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @vpmerge_vv_v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i16: ; CHECK: # %bb.0: @@ -549,8 +529,6 @@ define <8 x i16> @vpmerge_vi_v8i16(<8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.merge.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @vpmerge_vv_v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i16: ; CHECK: # %bb.0: @@ -584,8 +562,6 @@ define <16 x i16> @vpmerge_vi_v16i16(<16 x i16> %vb, <16 x i1> %m, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.merge.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @vpmerge_vv_v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i32: ; CHECK: # %bb.0: @@ -619,8 +595,6 @@ define <2 x i32> @vpmerge_vi_v2i32(<2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.merge.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @vpmerge_vv_v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i32: ; CHECK: # %bb.0: @@ -654,8 +628,6 @@ define <4 x i32> @vpmerge_vi_v4i32(<4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @vpmerge_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i32: ; CHECK: # %bb.0: @@ -689,8 +661,6 @@ define <8 x i32> @vpmerge_vi_v8i32(<8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.merge.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @vpmerge_vv_v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i32: ; CHECK: # %bb.0: @@ -724,8 +694,6 @@ define <16 x i32> @vpmerge_vi_v16i32(<16 x i32> %vb, <16 x i1> %m, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.merge.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @vpmerge_vv_v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i64: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define <2 x i64> @vpmerge_vi_v2i64(<2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl) ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.merge.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @vpmerge_vv_v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i64: ; CHECK: # %bb.0: @@ -866,8 +832,6 @@ define <4 x i64> @vpmerge_vi_v4i64(<4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl) ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.merge.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @vpmerge_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i64: ; CHECK: # %bb.0: @@ -937,8 +901,6 @@ define <8 x i64> @vpmerge_vi_v8i64(<8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.merge.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32) - define <16 x i64> @vpmerge_vv_v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i64: ; CHECK: # %bb.0: @@ -1008,8 +970,6 @@ define <16 x i64> @vpmerge_vi_v16i64(<16 x i64> %vb, <16 x i1> %m, i32 zeroext % ret <16 x i64> %v } -declare <2 x half> @llvm.vp.merge.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @vpmerge_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f16: ; CHECK: # %bb.0: @@ -1042,8 +1002,6 @@ define <2 x half> @vpmerge_vf_v2f16(half %a, <2 x half> %vb, <2 x i1> %m, i32 ze ret <2 x half> %v } -declare <4 x half> @llvm.vp.merge.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @vpmerge_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f16: ; CHECK: # %bb.0: @@ -1076,8 +1034,6 @@ define <4 x half> @vpmerge_vf_v4f16(half %a, <4 x half> %vb, <4 x i1> %m, i32 ze ret <4 x half> %v } -declare <8 x half> @llvm.vp.merge.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @vpmerge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f16: ; CHECK: # %bb.0: @@ -1110,8 +1066,6 @@ define <8 x half> @vpmerge_vf_v8f16(half %a, <8 x half> %vb, <8 x i1> %m, i32 ze ret <8 x half> %v } -declare <16 x half> @llvm.vp.merge.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @vpmerge_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f16: ; CHECK: # %bb.0: @@ -1144,8 +1098,6 @@ define <16 x half> @vpmerge_vf_v16f16(half %a, <16 x half> %vb, <16 x i1> %m, i3 ret <16 x half> %v } -declare <2 x float> @llvm.vp.merge.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @vpmerge_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f32: ; CHECK: # %bb.0: @@ -1169,8 +1121,6 @@ define <2 x float> @vpmerge_vf_v2f32(float %a, <2 x float> %vb, <2 x i1> %m, i32 ret <2 x float> %v } -declare <4 x float> @llvm.vp.merge.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @vpmerge_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f32: ; CHECK: # %bb.0: @@ -1194,8 +1144,6 @@ define <4 x float> @vpmerge_vf_v4f32(float %a, <4 x float> %vb, <4 x i1> %m, i32 ret <4 x float> %v } -declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @vpmerge_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f32: ; CHECK: # %bb.0: @@ -1219,8 +1167,6 @@ define <8 x float> @vpmerge_vf_v8f32(float %a, <8 x float> %vb, <8 x i1> %m, i32 ret <8 x float> %v } -declare <16 x float> @llvm.vp.merge.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @vpmerge_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f32: ; CHECK: # %bb.0: @@ -1244,8 +1190,6 @@ define <16 x float> @vpmerge_vf_v16f32(float %a, <16 x float> %vb, <16 x i1> %m, ret <16 x float> %v } -declare <2 x double> @llvm.vp.merge.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @vpmerge_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f64: ; CHECK: # %bb.0: @@ -1269,8 +1213,6 @@ define <2 x double> @vpmerge_vf_v2f64(double %a, <2 x double> %vb, <2 x i1> %m, ret <2 x double> %v } -declare <4 x double> @llvm.vp.merge.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @vpmerge_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f64: ; CHECK: # %bb.0: @@ -1294,8 +1236,6 @@ define <4 x double> @vpmerge_vf_v4f64(double %a, <4 x double> %vb, <4 x i1> %m, ret <4 x double> %v } -declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @vpmerge_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f64: ; CHECK: # %bb.0: @@ -1319,8 +1259,6 @@ define <8 x double> @vpmerge_vf_v8f64(double %a, <8 x double> %vb, <8 x i1> %m, ret <8 x double> %v } -declare <16 x double> @llvm.vp.merge.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32) - define <16 x double> @vpmerge_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f64: ; CHECK: # %bb.0: @@ -1344,8 +1282,6 @@ define <16 x double> @vpmerge_vf_v16f64(double %a, <16 x double> %vb, <16 x i1> ret <16 x double> %v } -declare <32 x double> @llvm.vp.merge.v32f64(<32 x i1>, <32 x double>, <32 x double>, i32) - define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll index c361ccce14e4a..b4d20d93f2a1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare void @llvm.vp.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i8(<2 x i8> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i8: ; RV32: # %bb.0: @@ -101,8 +99,6 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x ptr> %ptrs, <2 ret void } -declare void @llvm.vp.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i8: ; RV32: # %bb.0: @@ -135,8 +131,6 @@ define void @vpscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, i32 zeroext ret void } -declare void @llvm.vp.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i8(<8 x i8> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i8: ; RV32: # %bb.0: @@ -174,8 +168,6 @@ define void @vpscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8 ret void } -declare void @llvm.vp.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i16(<2 x i16> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i16: ; RV32: # %bb.0: @@ -238,8 +230,6 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, < ret void } -declare void @llvm.vp.scatter.v3i16.v3p0(<3 x i16>, <3 x ptr>, <3 x i1>, i32) - define void @vpscatter_v3i16(<3 x i16> %val, <3 x ptr> %ptrs, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v3i16: ; RV32: # %bb.0: @@ -272,8 +262,6 @@ define void @vpscatter_truemask_v3i16(<3 x i16> %val, <3 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i16: ; RV32: # %bb.0: @@ -306,8 +294,6 @@ define void @vpscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i16(<8 x i16> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i16: ; RV32: # %bb.0: @@ -415,8 +401,6 @@ define void @vpscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs, ret void } -declare void @llvm.vp.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i32(<2 x i32> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i32: ; RV32: # %bb.0: @@ -454,8 +438,6 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x ptr> %ptrs, < ret void } -declare void @llvm.vp.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i32: ; RV32: # %bb.0: @@ -488,8 +470,6 @@ define void @vpscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i32(<8 x i32> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i32: ; RV32: # %bb.0: @@ -670,8 +650,6 @@ define void @vpscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs, ret void } -declare void @llvm.vp.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i64: ; RV32: # %bb.0: @@ -688,8 +666,6 @@ define void @vpscatter_v2i64(<2 x i64> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 z ret void } -declare void @llvm.vp.scatter.v4i64.v4p0(<4 x i64>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i64: ; RV32: # %bb.0: @@ -722,8 +698,6 @@ define void @vpscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, i32 zeroe ret void } -declare void @llvm.vp.scatter.v8i64.v8p0(<8 x i64>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i64: ; RV32: # %bb.0: @@ -972,8 +946,6 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs, ret void } -declare void @llvm.vp.scatter.v2bf16.v2p0(<2 x bfloat>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2bf16: ; RV32: # %bb.0: @@ -990,8 +962,6 @@ define void @vpscatter_v2bf16(<2 x bfloat> %val, <2 x ptr> %ptrs, <2 x i1> %m, i ret void } -declare void @llvm.vp.scatter.v4bf16.v4p0(<4 x bfloat>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4bf16: ; RV32: # %bb.0: @@ -1024,8 +994,6 @@ define void @vpscatter_truemask_v4bf16(<4 x bfloat> %val, <4 x ptr> %ptrs, i32 z ret void } -declare void @llvm.vp.scatter.v8bf16.v8p0(<8 x bfloat>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8bf16(<8 x bfloat> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8bf16: ; RV32: # %bb.0: @@ -1042,8 +1010,6 @@ define void @vpscatter_v8bf16(<8 x bfloat> %val, <8 x ptr> %ptrs, <8 x i1> %m, i ret void } -declare void @llvm.vp.scatter.v2f16.v2p0(<2 x half>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f16: ; RV32: # %bb.0: @@ -1060,8 +1026,6 @@ define void @vpscatter_v2f16(<2 x half> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 ret void } -declare void @llvm.vp.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4f16(<4 x half> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f16: ; RV32: # %bb.0: @@ -1094,8 +1058,6 @@ define void @vpscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs, i32 zero ret void } -declare void @llvm.vp.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8f16(<8 x half> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f16: ; RV32: # %bb.0: @@ -1203,8 +1165,6 @@ define void @vpscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs ret void } -declare void @llvm.vp.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f32: ; RV32: # %bb.0: @@ -1221,8 +1181,6 @@ define void @vpscatter_v2f32(<2 x float> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 ret void } -declare void @llvm.vp.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4f32(<4 x float> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f32: ; RV32: # %bb.0: @@ -1255,8 +1213,6 @@ define void @vpscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs, i32 zer ret void } -declare void @llvm.vp.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f32: ; RV32: # %bb.0: @@ -1437,8 +1393,6 @@ define void @vpscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idx ret void } -declare void @llvm.vp.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, <2 x i1>, i32) - define void @vpscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f64: ; RV32: # %bb.0: @@ -1455,8 +1409,6 @@ define void @vpscatter_v2f64(<2 x double> %val, <2 x ptr> %ptrs, <2 x i1> %m, i3 ret void } -declare void @llvm.vp.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, <4 x i1>, i32) - define void @vpscatter_v4f64(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f64: ; RV32: # %bb.0: @@ -1489,8 +1441,6 @@ define void @vpscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs, i32 ze ret void } -declare void @llvm.vp.scatter.v8f64.v8p0(<8 x double>, <8 x ptr>, <8 x i1>, i32) - define void @vpscatter_v8f64(<8 x double> %val, <8 x ptr> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f64: ; RV32: # %bb.0: @@ -1739,8 +1689,6 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %id ret void } -declare void @llvm.vp.scatter.v32f64.v32p0(<32 x double>, <32 x ptr>, <32 x i1>, i32) - define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v32f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll index d30e8b46e6df2..855a87d21b7dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.vp.store.v2i8.p0(<2 x i8>, ptr, <2 x i1>, i32) - define void @vpstore_v2i8(<2 x i8> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i8: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define void @vpstore_v2i8(<2 x i8> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v4i8.p0(<4 x i8>, ptr, <4 x i1>, i32) - define void @vpstore_v4i8(<4 x i8> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i8: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define void @vpstore_v4i8(<4 x i8> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v8i7.v8i7.p0(<8 x i7>, ptr, <8 x i1>, i32) - define void @vpstore_v8i7(<8 x i7> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i7: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define void @vpstore_v8i7(<8 x i7> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v8i8.p0(<8 x i8>, ptr, <8 x i1>, i32) - define void @vpstore_v8i8(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i8: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define void @vpstore_v8i8(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v2i16.p0(<2 x i16>, ptr, <2 x i1>, i32) - define void @vpstore_v2i16(<2 x i16> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i16: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define void @vpstore_v2i16(<2 x i16> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v4i16.p0(<4 x i16>, ptr, <4 x i1>, i32) - define void @vpstore_v4i16(<4 x i16> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i16: ; CHECK: # %bb.0: @@ -76,8 +64,6 @@ define void @vpstore_v4i16(<4 x i16> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v8i16.p0(<8 x i16>, ptr, <8 x i1>, i32) - define void @vpstore_v8i16(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i16: ; CHECK: # %bb.0: @@ -88,8 +74,6 @@ define void @vpstore_v8i16(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v2i32.p0(<2 x i32>, ptr, <2 x i1>, i32) - define void @vpstore_v2i32(<2 x i32> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i32: ; CHECK: # %bb.0: @@ -100,8 +84,6 @@ define void @vpstore_v2i32(<2 x i32> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v4i32.p0(<4 x i32>, ptr, <4 x i1>, i32) - define void @vpstore_v4i32(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i32: ; CHECK: # %bb.0: @@ -112,8 +94,6 @@ define void @vpstore_v4i32(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v8i32.p0(<8 x i32>, ptr, <8 x i1>, i32) - define void @vpstore_v8i32(<8 x i32> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i32: ; CHECK: # %bb.0: @@ -124,8 +104,6 @@ define void @vpstore_v8i32(<8 x i32> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v2i64.p0(<2 x i64>, ptr, <2 x i1>, i32) - define void @vpstore_v2i64(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i64: ; CHECK: # %bb.0: @@ -136,8 +114,6 @@ define void @vpstore_v2i64(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v4i64.p0(<4 x i64>, ptr, <4 x i1>, i32) - define void @vpstore_v4i64(<4 x i64> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i64: ; CHECK: # %bb.0: @@ -148,8 +124,6 @@ define void @vpstore_v4i64(<4 x i64> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v8i64.p0(<8 x i64>, ptr, <8 x i1>, i32) - define void @vpstore_v8i64(<8 x i64> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i64: ; CHECK: # %bb.0: @@ -160,8 +134,6 @@ define void @vpstore_v8i64(<8 x i64> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %e ret void } -declare void @llvm.vp.store.v2f16.p0(<2 x half>, ptr, <2 x i1>, i32) - define void @vpstore_v2f16(<2 x half> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f16: ; CHECK: # %bb.0: @@ -172,8 +144,6 @@ define void @vpstore_v2f16(<2 x half> %val, ptr %ptr, <2 x i1> %m, i32 zeroext % ret void } -declare void @llvm.vp.store.v4f16.p0(<4 x half>, ptr, <4 x i1>, i32) - define void @vpstore_v4f16(<4 x half> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f16: ; CHECK: # %bb.0: @@ -184,8 +154,6 @@ define void @vpstore_v4f16(<4 x half> %val, ptr %ptr, <4 x i1> %m, i32 zeroext % ret void } -declare void @llvm.vp.store.v8f16.p0(<8 x half>, ptr, <8 x i1>, i32) - define void @vpstore_v8f16(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f16: ; CHECK: # %bb.0: @@ -196,8 +164,6 @@ define void @vpstore_v8f16(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext % ret void } -declare void @llvm.vp.store.v2f32.p0(<2 x float>, ptr, <2 x i1>, i32) - define void @vpstore_v2f32(<2 x float> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f32: ; CHECK: # %bb.0: @@ -208,8 +174,6 @@ define void @vpstore_v2f32(<2 x float> %val, ptr %ptr, <2 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v4f32.p0(<4 x float>, ptr, <4 x i1>, i32) - define void @vpstore_v4f32(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f32: ; CHECK: # %bb.0: @@ -220,8 +184,6 @@ define void @vpstore_v4f32(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v6f32.p0(<6 x float>, ptr, <6 x i1>, i32) - define void @vpstore_v6f32(<6 x float> %val, ptr %ptr, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v6f32: ; CHECK: # %bb.0: @@ -232,8 +194,6 @@ define void @vpstore_v6f32(<6 x float> %val, ptr %ptr, <6 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v8f32.p0(<8 x float>, ptr, <8 x i1>, i32) - define void @vpstore_v8f32(<8 x float> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f32: ; CHECK: # %bb.0: @@ -244,8 +204,6 @@ define void @vpstore_v8f32(<8 x float> %val, ptr %ptr, <8 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v2f64.p0(<2 x double>, ptr, <2 x i1>, i32) - define void @vpstore_v2f64(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f64: ; CHECK: # %bb.0: @@ -256,8 +214,6 @@ define void @vpstore_v2f64(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v4f64.p0(<4 x double>, ptr, <4 x i1>, i32) - define void @vpstore_v4f64(<4 x double> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f64: ; CHECK: # %bb.0: @@ -268,8 +224,6 @@ define void @vpstore_v4f64(<4 x double> %val, ptr %ptr, <4 x i1> %m, i32 zeroext ret void } -declare void @llvm.vp.store.v8f64.p0(<8 x double>, ptr, <8 x i1>, i32) - define void @vpstore_v8f64(<8 x double> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f64: ; CHECK: # %bb.0: @@ -290,8 +244,6 @@ define void @vpstore_v2i8_allones_mask(<2 x i8> %val, ptr %ptr, i32 zeroext %evl ret void } -declare void @llvm.vp.store.v32f64.p0(<32 x double>, ptr, <32 x i1>, i32) - define void @vpstore_v32f64(<32 x double> %val, ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v32f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll index 0d31ec5f78435..74a958f40f35b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vector.reduce.or.v1i1(<1 x i1>) - define zeroext i1 @vreduce_or_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_or_v1i1: ; CHECK: # %bb.0: @@ -15,8 +13,6 @@ define zeroext i1 @vreduce_or_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v1i1(<1 x i1>) - define zeroext i1 @vreduce_xor_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v1i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define zeroext i1 @vreduce_xor_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v1i1(<1 x i1>) - define zeroext i1 @vreduce_and_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_and_v1i1: ; CHECK: # %bb.0: @@ -41,8 +35,6 @@ define zeroext i1 @vreduce_and_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v1i1(<1 x i1>) - define zeroext i1 @vreduce_umax_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v1i1: ; CHECK: # %bb.0: @@ -54,8 +46,6 @@ define zeroext i1 @vreduce_umax_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v1i1(<1 x i1>) - define zeroext i1 @vreduce_smax_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v1i1: ; CHECK: # %bb.0: @@ -67,8 +57,6 @@ define zeroext i1 @vreduce_smax_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v1i1(<1 x i1>) - define zeroext i1 @vreduce_umin_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v1i1: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define zeroext i1 @vreduce_umin_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v1i1(<1 x i1>) - define zeroext i1 @vreduce_smin_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v1i1: ; CHECK: # %bb.0: @@ -93,8 +79,6 @@ define zeroext i1 @vreduce_smin_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v2i1(<2 x i1>) - define zeroext i1 @vreduce_or_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_or_v2i1: ; CHECK: # %bb.0: @@ -106,8 +90,6 @@ define zeroext i1 @vreduce_or_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v2i1(<2 x i1>) - define zeroext i1 @vreduce_xor_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v2i1: ; CHECK: # %bb.0: @@ -119,8 +101,6 @@ define zeroext i1 @vreduce_xor_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v2i1(<2 x i1>) - define zeroext i1 @vreduce_and_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_and_v2i1: ; CHECK: # %bb.0: @@ -133,8 +113,6 @@ define zeroext i1 @vreduce_and_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v2i1(<2 x i1>) - define zeroext i1 @vreduce_umax_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v2i1: ; CHECK: # %bb.0: @@ -146,8 +124,6 @@ define zeroext i1 @vreduce_umax_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v2i1(<2 x i1>) - define zeroext i1 @vreduce_smax_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v2i1: ; CHECK: # %bb.0: @@ -160,8 +136,6 @@ define zeroext i1 @vreduce_smax_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v2i1(<2 x i1>) - define zeroext i1 @vreduce_umin_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v2i1: ; CHECK: # %bb.0: @@ -174,8 +148,6 @@ define zeroext i1 @vreduce_umin_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v2i1(<2 x i1>) - define zeroext i1 @vreduce_smin_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v2i1: ; CHECK: # %bb.0: @@ -187,8 +159,6 @@ define zeroext i1 @vreduce_smin_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v4i1(<4 x i1>) - define zeroext i1 @vreduce_or_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_or_v4i1: ; CHECK: # %bb.0: @@ -200,8 +170,6 @@ define zeroext i1 @vreduce_or_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v4i1(<4 x i1>) - define zeroext i1 @vreduce_xor_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v4i1: ; CHECK: # %bb.0: @@ -213,8 +181,6 @@ define zeroext i1 @vreduce_xor_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v4i1(<4 x i1>) - define zeroext i1 @vreduce_and_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_and_v4i1: ; CHECK: # %bb.0: @@ -227,8 +193,6 @@ define zeroext i1 @vreduce_and_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v4i1(<4 x i1>) - define zeroext i1 @vreduce_umax_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v4i1: ; CHECK: # %bb.0: @@ -240,8 +204,6 @@ define zeroext i1 @vreduce_umax_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v4i1(<4 x i1>) - define zeroext i1 @vreduce_smax_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v4i1: ; CHECK: # %bb.0: @@ -254,8 +216,6 @@ define zeroext i1 @vreduce_smax_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v4i1(<4 x i1>) - define zeroext i1 @vreduce_umin_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v4i1: ; CHECK: # %bb.0: @@ -268,8 +228,6 @@ define zeroext i1 @vreduce_umin_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v4i1(<4 x i1>) - define zeroext i1 @vreduce_smin_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v4i1: ; CHECK: # %bb.0: @@ -281,8 +239,6 @@ define zeroext i1 @vreduce_smin_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v8i1(<8 x i1>) - define zeroext i1 @vreduce_or_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_or_v8i1: ; CHECK: # %bb.0: @@ -294,8 +250,6 @@ define zeroext i1 @vreduce_or_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v8i1(<8 x i1>) - define zeroext i1 @vreduce_xor_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v8i1: ; CHECK: # %bb.0: @@ -307,8 +261,6 @@ define zeroext i1 @vreduce_xor_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v8i1(<8 x i1>) - define zeroext i1 @vreduce_and_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_and_v8i1: ; CHECK: # %bb.0: @@ -321,8 +273,6 @@ define zeroext i1 @vreduce_and_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v8i1(<8 x i1>) - define zeroext i1 @vreduce_umax_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v8i1: ; CHECK: # %bb.0: @@ -334,8 +284,6 @@ define zeroext i1 @vreduce_umax_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v8i1(<8 x i1>) - define zeroext i1 @vreduce_smax_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v8i1: ; CHECK: # %bb.0: @@ -348,8 +296,6 @@ define zeroext i1 @vreduce_smax_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v8i1(<8 x i1>) - define zeroext i1 @vreduce_umin_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v8i1: ; CHECK: # %bb.0: @@ -362,8 +308,6 @@ define zeroext i1 @vreduce_umin_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v8i1(<8 x i1>) - define zeroext i1 @vreduce_smin_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v8i1: ; CHECK: # %bb.0: @@ -375,8 +319,6 @@ define zeroext i1 @vreduce_smin_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v16i1(<16 x i1>) - define zeroext i1 @vreduce_or_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_or_v16i1: ; CHECK: # %bb.0: @@ -388,8 +330,6 @@ define zeroext i1 @vreduce_or_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v16i1(<16 x i1>) - define zeroext i1 @vreduce_xor_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v16i1: ; CHECK: # %bb.0: @@ -401,8 +341,6 @@ define zeroext i1 @vreduce_xor_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v16i1(<16 x i1>) - define zeroext i1 @vreduce_and_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_and_v16i1: ; CHECK: # %bb.0: @@ -415,8 +353,6 @@ define zeroext i1 @vreduce_and_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v16i1(<16 x i1>) - define zeroext i1 @vreduce_umax_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v16i1: ; CHECK: # %bb.0: @@ -428,8 +364,6 @@ define zeroext i1 @vreduce_umax_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v16i1(<16 x i1>) - define zeroext i1 @vreduce_smax_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v16i1: ; CHECK: # %bb.0: @@ -442,8 +376,6 @@ define zeroext i1 @vreduce_smax_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v16i1(<16 x i1>) - define zeroext i1 @vreduce_umin_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v16i1: ; CHECK: # %bb.0: @@ -456,8 +388,6 @@ define zeroext i1 @vreduce_umin_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v16i1(<16 x i1>) - define zeroext i1 @vreduce_smin_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v16i1: ; CHECK: # %bb.0: @@ -469,8 +399,6 @@ define zeroext i1 @vreduce_smin_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v32i1(<32 x i1>) - define zeroext i1 @vreduce_or_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_or_v32i1: ; CHECK: # %bb.0: @@ -483,8 +411,6 @@ define zeroext i1 @vreduce_or_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v32i1(<32 x i1>) - define zeroext i1 @vreduce_xor_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v32i1: ; CHECK: # %bb.0: @@ -497,8 +423,6 @@ define zeroext i1 @vreduce_xor_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v32i1(<32 x i1>) - define zeroext i1 @vreduce_and_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_and_v32i1: ; CHECK: # %bb.0: @@ -512,8 +436,6 @@ define zeroext i1 @vreduce_and_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v32i1(<32 x i1>) - define zeroext i1 @vreduce_umax_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v32i1: ; CHECK: # %bb.0: @@ -526,8 +448,6 @@ define zeroext i1 @vreduce_umax_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v32i1(<32 x i1>) - define zeroext i1 @vreduce_smax_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v32i1: ; CHECK: # %bb.0: @@ -541,8 +461,6 @@ define zeroext i1 @vreduce_smax_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v32i1(<32 x i1>) - define zeroext i1 @vreduce_umin_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v32i1: ; CHECK: # %bb.0: @@ -556,8 +474,6 @@ define zeroext i1 @vreduce_umin_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v32i1(<32 x i1>) - define zeroext i1 @vreduce_smin_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v32i1: ; CHECK: # %bb.0: @@ -570,8 +486,6 @@ define zeroext i1 @vreduce_smin_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v64i1(<64 x i1>) - define zeroext i1 @vreduce_or_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_or_v64i1: ; CHECK: # %bb.0: @@ -584,8 +498,6 @@ define zeroext i1 @vreduce_or_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v64i1(<64 x i1>) - define zeroext i1 @vreduce_xor_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v64i1: ; CHECK: # %bb.0: @@ -598,8 +510,6 @@ define zeroext i1 @vreduce_xor_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v64i1(<64 x i1>) - define zeroext i1 @vreduce_and_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_and_v64i1: ; CHECK: # %bb.0: @@ -613,8 +523,6 @@ define zeroext i1 @vreduce_and_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v64i1(<64 x i1>) - define zeroext i1 @vreduce_umax_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v64i1: ; CHECK: # %bb.0: @@ -627,8 +535,6 @@ define zeroext i1 @vreduce_umax_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v64i1(<64 x i1>) - define zeroext i1 @vreduce_smax_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v64i1: ; CHECK: # %bb.0: @@ -642,8 +548,6 @@ define zeroext i1 @vreduce_smax_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v64i1(<64 x i1>) - define zeroext i1 @vreduce_umin_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v64i1: ; CHECK: # %bb.0: @@ -657,8 +561,6 @@ define zeroext i1 @vreduce_umin_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v64i1(<64 x i1>) - define zeroext i1 @vreduce_smin_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v64i1: ; CHECK: # %bb.0: @@ -671,8 +573,6 @@ define zeroext i1 @vreduce_smin_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v1i1(<1 x i1>) - define zeroext i1 @vreduce_add_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_add_v1i1: ; CHECK: # %bb.0: @@ -684,8 +584,6 @@ define zeroext i1 @vreduce_add_v1i1(<1 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v2i1(<2 x i1>) - define zeroext i1 @vreduce_add_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_add_v2i1: ; CHECK: # %bb.0: @@ -697,8 +595,6 @@ define zeroext i1 @vreduce_add_v2i1(<2 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v4i1(<4 x i1>) - define zeroext i1 @vreduce_add_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_add_v4i1: ; CHECK: # %bb.0: @@ -710,8 +606,6 @@ define zeroext i1 @vreduce_add_v4i1(<4 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v8i1(<8 x i1>) - define zeroext i1 @vreduce_add_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_add_v8i1: ; CHECK: # %bb.0: @@ -723,8 +617,6 @@ define zeroext i1 @vreduce_add_v8i1(<8 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v16i1(<16 x i1>) - define zeroext i1 @vreduce_add_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_add_v16i1: ; CHECK: # %bb.0: @@ -736,8 +628,6 @@ define zeroext i1 @vreduce_add_v16i1(<16 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v32i1(<32 x i1>) - define zeroext i1 @vreduce_add_v32i1(<32 x i1> %v) { ; CHECK-LABEL: vreduce_add_v32i1: ; CHECK: # %bb.0: @@ -750,8 +640,6 @@ define zeroext i1 @vreduce_add_v32i1(<32 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.v64i1(<64 x i1>) - define zeroext i1 @vreduce_add_v64i1(<64 x i1> %v) { ; CHECK-LABEL: vreduce_add_v64i1: ; CHECK: # %bb.0: @@ -764,8 +652,6 @@ define zeroext i1 @vreduce_add_v64i1(<64 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v128i1(<128 x i1>) - define zeroext i1 @vreduce_or_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_or_v128i1: ; CHECK: # %bb.0: @@ -778,8 +664,6 @@ define zeroext i1 @vreduce_or_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v128i1(<128 x i1>) - define zeroext i1 @vreduce_xor_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v128i1: ; CHECK: # %bb.0: @@ -792,8 +676,6 @@ define zeroext i1 @vreduce_xor_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v128i1(<128 x i1>) - define zeroext i1 @vreduce_and_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_and_v128i1: ; CHECK: # %bb.0: @@ -807,8 +689,6 @@ define zeroext i1 @vreduce_and_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v128i1(<128 x i1>) - define zeroext i1 @vreduce_umax_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v128i1: ; CHECK: # %bb.0: @@ -821,8 +701,6 @@ define zeroext i1 @vreduce_umax_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v128i1(<128 x i1>) - define zeroext i1 @vreduce_smax_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v128i1: ; CHECK: # %bb.0: @@ -836,8 +714,6 @@ define zeroext i1 @vreduce_smax_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v128i1(<128 x i1>) - define zeroext i1 @vreduce_umin_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v128i1: ; CHECK: # %bb.0: @@ -851,8 +727,6 @@ define zeroext i1 @vreduce_umin_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v128i1(<128 x i1>) - define zeroext i1 @vreduce_smin_v128i1(<128 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v128i1: ; CHECK: # %bb.0: @@ -865,8 +739,6 @@ define zeroext i1 @vreduce_smin_v128i1(<128 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v256i1(<256 x i1>) - define zeroext i1 @vreduce_or_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_or_v256i1: ; CHECK: # %bb.0: @@ -880,8 +752,6 @@ define zeroext i1 @vreduce_or_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v256i1(<256 x i1>) - define zeroext i1 @vreduce_xor_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v256i1: ; CHECK: # %bb.0: @@ -895,8 +765,6 @@ define zeroext i1 @vreduce_xor_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v256i1(<256 x i1>) - define zeroext i1 @vreduce_and_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_and_v256i1: ; CHECK: # %bb.0: @@ -910,8 +778,6 @@ define zeroext i1 @vreduce_and_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v256i1(<256 x i1>) - define zeroext i1 @vreduce_umax_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v256i1: ; CHECK: # %bb.0: @@ -925,8 +791,6 @@ define zeroext i1 @vreduce_umax_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v256i1(<256 x i1>) - define zeroext i1 @vreduce_smax_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v256i1: ; CHECK: # %bb.0: @@ -940,8 +804,6 @@ define zeroext i1 @vreduce_smax_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v256i1(<256 x i1>) - define zeroext i1 @vreduce_umin_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v256i1: ; CHECK: # %bb.0: @@ -955,8 +817,6 @@ define zeroext i1 @vreduce_umin_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v256i1(<256 x i1>) - define zeroext i1 @vreduce_smin_v256i1(<256 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v256i1: ; CHECK: # %bb.0: @@ -970,8 +830,6 @@ define zeroext i1 @vreduce_smin_v256i1(<256 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v512i1(<512 x i1>) - define zeroext i1 @vreduce_or_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_or_v512i1: ; CHECK: # %bb.0: @@ -987,8 +845,6 @@ define zeroext i1 @vreduce_or_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v512i1(<512 x i1>) - define zeroext i1 @vreduce_xor_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v512i1: ; CHECK: # %bb.0: @@ -1004,8 +860,6 @@ define zeroext i1 @vreduce_xor_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v512i1(<512 x i1>) - define zeroext i1 @vreduce_and_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_and_v512i1: ; CHECK: # %bb.0: @@ -1021,8 +875,6 @@ define zeroext i1 @vreduce_and_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v512i1(<512 x i1>) - define zeroext i1 @vreduce_umax_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v512i1: ; CHECK: # %bb.0: @@ -1038,8 +890,6 @@ define zeroext i1 @vreduce_umax_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v512i1(<512 x i1>) - define zeroext i1 @vreduce_smax_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v512i1: ; CHECK: # %bb.0: @@ -1055,8 +905,6 @@ define zeroext i1 @vreduce_smax_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v512i1(<512 x i1>) - define zeroext i1 @vreduce_umin_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v512i1: ; CHECK: # %bb.0: @@ -1072,8 +920,6 @@ define zeroext i1 @vreduce_umin_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v512i1(<512 x i1>) - define zeroext i1 @vreduce_smin_v512i1(<512 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v512i1: ; CHECK: # %bb.0: @@ -1089,8 +935,6 @@ define zeroext i1 @vreduce_smin_v512i1(<512 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_or_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_or_v1024i1: ; CHECK: # %bb.0: @@ -1110,8 +954,6 @@ define zeroext i1 @vreduce_or_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_xor_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v1024i1: ; CHECK: # %bb.0: @@ -1131,8 +973,6 @@ define zeroext i1 @vreduce_xor_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_and_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_and_v1024i1: ; CHECK: # %bb.0: @@ -1152,8 +992,6 @@ define zeroext i1 @vreduce_and_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_umax_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v1024i1: ; CHECK: # %bb.0: @@ -1173,8 +1011,6 @@ define zeroext i1 @vreduce_umax_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_smax_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v1024i1: ; CHECK: # %bb.0: @@ -1194,8 +1030,6 @@ define zeroext i1 @vreduce_smax_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_umin_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v1024i1: ; CHECK: # %bb.0: @@ -1215,8 +1049,6 @@ define zeroext i1 @vreduce_umin_v1024i1(<1024 x i1> %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.v1024i1(<1024 x i1>) - define zeroext i1 @vreduce_smin_v1024i1(<1024 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v1024i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll index b3d35a51280ac..78eabfec4153e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.srem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vrem_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vrem_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.srem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i8: ; CHECK: # %bb.0: @@ -66,8 +62,6 @@ define <2 x i8> @vrem_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.srem.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vrem_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i8: ; CHECK: # %bb.0: @@ -112,8 +106,6 @@ define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.srem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v6i8: ; CHECK: # %bb.0: @@ -124,8 +116,6 @@ define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroex ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.srem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i8: ; CHECK: # %bb.0: @@ -170,8 +160,6 @@ define <8 x i8> @vrem_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.srem.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vrem_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i8: ; CHECK: # %bb.0: @@ -216,8 +204,6 @@ define <16 x i8> @vrem_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.srem.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vrem_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i16: ; CHECK: # %bb.0: @@ -262,8 +248,6 @@ define <2 x i16> @vrem_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.srem.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vrem_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i16: ; CHECK: # %bb.0: @@ -308,8 +292,6 @@ define <4 x i16> @vrem_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.srem.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vrem_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i16: ; CHECK: # %bb.0: @@ -354,8 +336,6 @@ define <8 x i16> @vrem_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.srem.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vrem_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i16: ; CHECK: # %bb.0: @@ -400,8 +380,6 @@ define <16 x i16> @vrem_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.srem.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vrem_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i32: ; CHECK: # %bb.0: @@ -446,8 +424,6 @@ define <2 x i32> @vrem_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vrem_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i32: ; CHECK: # %bb.0: @@ -492,8 +468,6 @@ define <4 x i32> @vrem_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.srem.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vrem_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i32: ; CHECK: # %bb.0: @@ -538,8 +512,6 @@ define <8 x i32> @vrem_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.srem.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vrem_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i32: ; CHECK: # %bb.0: @@ -584,8 +556,6 @@ define <16 x i32> @vrem_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.srem.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vrem_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i64: ; CHECK: # %bb.0: @@ -660,8 +630,6 @@ define <2 x i64> @vrem_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.srem.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vrem_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i64: ; CHECK: # %bb.0: @@ -736,8 +704,6 @@ define <4 x i64> @vrem_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.srem.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vrem_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i64: ; CHECK: # %bb.0: @@ -812,8 +778,6 @@ define <8 x i64> @vrem_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.srem.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vrem_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i64: ; CHECK: # %bb.0: @@ -888,9 +852,6 @@ define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext % ret <16 x i64> %v } - -declare <3 x i8> @llvm.vp.srem.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vrem_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -911,8 +872,6 @@ define <3 x i8> @vrem_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.srem.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vrem_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll index 2a453e3a39c2f..7ba66d61b13f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.urem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.urem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i8: ; CHECK: # %bb.0: @@ -65,8 +61,6 @@ define <2 x i8> @vremu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.urem.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vremu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i8: ; CHECK: # %bb.0: @@ -111,8 +105,6 @@ define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <6 x i8> @llvm.vp.urem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) - define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v6i8: ; CHECK: # %bb.0: @@ -123,8 +115,6 @@ define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroe ret <6 x i8> %v } -declare <8 x i8> @llvm.vp.urem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i8: ; CHECK: # %bb.0: @@ -169,8 +159,6 @@ define <8 x i8> @vremu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.urem.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vremu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i8: ; CHECK: # %bb.0: @@ -215,8 +203,6 @@ define <16 x i8> @vremu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.urem.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vremu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i16: ; CHECK: # %bb.0: @@ -261,8 +247,6 @@ define <2 x i16> @vremu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.urem.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vremu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i16: ; CHECK: # %bb.0: @@ -307,8 +291,6 @@ define <4 x i16> @vremu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.urem.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vremu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i16: ; CHECK: # %bb.0: @@ -353,8 +335,6 @@ define <8 x i16> @vremu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.urem.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vremu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i16: ; CHECK: # %bb.0: @@ -399,8 +379,6 @@ define <16 x i16> @vremu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.urem.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vremu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i32: ; CHECK: # %bb.0: @@ -445,8 +423,6 @@ define <2 x i32> @vremu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vremu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i32: ; CHECK: # %bb.0: @@ -491,8 +467,6 @@ define <4 x i32> @vremu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.urem.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vremu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i32: ; CHECK: # %bb.0: @@ -537,8 +511,6 @@ define <8 x i32> @vremu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.urem.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vremu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i32: ; CHECK: # %bb.0: @@ -583,8 +555,6 @@ define <16 x i32> @vremu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.urem.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vremu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i64: ; CHECK: # %bb.0: @@ -659,8 +629,6 @@ define <2 x i64> @vremu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.urem.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vremu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i64: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define <4 x i64> @vremu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.urem.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vremu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i64: ; CHECK: # %bb.0: @@ -811,8 +777,6 @@ define <8 x i64> @vremu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.urem.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vremu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i64: ; CHECK: # %bb.0: @@ -887,9 +851,6 @@ define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext ret <16 x i64> %v } - -declare <3 x i8> @llvm.vp.urem.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vremu_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v3i8_unmasked: ; CHECK: # %bb.0: @@ -910,8 +871,6 @@ define <3 x i8> @vremu_vv_v3i8_unmasked_avl3(<3 x i8> %va, <3 x i8> %b) { ret <3 x i8> %v } -declare <7 x i8> @llvm.vp.urem.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vremu_vv_v7i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v7i8_unmasked: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll index 32ae81926bbee..9e0b04d7f09ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB -declare <1 x i8> @llvm.fshl.v1i8(<1 x i8>, <1 x i8>, <1 x i8>) - define <1 x i8> @vrol_vv_v1i8(<1 x i8> %a, <1 x i8> %b) { ; CHECK-LABEL: vrol_vv_v1i8: ; CHECK: # %bb.0: @@ -51,8 +49,6 @@ define <1 x i8> @vrol_vx_v1i8(<1 x i8> %a, i8 %b) { ret <1 x i8> %x } -declare <2 x i8> @llvm.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) - define <2 x i8> @vrol_vv_v2i8(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: vrol_vv_v2i8: ; CHECK: # %bb.0: @@ -98,8 +94,6 @@ define <2 x i8> @vrol_vx_v2i8(<2 x i8> %a, i8 %b) { ret <2 x i8> %x } -declare <4 x i8> @llvm.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) - define <4 x i8> @vrol_vv_v4i8(<4 x i8> %a, <4 x i8> %b) { ; CHECK-LABEL: vrol_vv_v4i8: ; CHECK: # %bb.0: @@ -145,8 +139,6 @@ define <4 x i8> @vrol_vx_v4i8(<4 x i8> %a, i8 %b) { ret <4 x i8> %x } -declare <8 x i8> @llvm.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) - define <8 x i8> @vrol_vv_v8i8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: vrol_vv_v8i8: ; CHECK: # %bb.0: @@ -192,8 +184,6 @@ define <8 x i8> @vrol_vx_v8i8(<8 x i8> %a, i8 %b) { ret <8 x i8> %x } -declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) - define <16 x i8> @vrol_vv_v16i8(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: vrol_vv_v16i8: ; CHECK: # %bb.0: @@ -239,8 +229,6 @@ define <16 x i8> @vrol_vx_v16i8(<16 x i8> %a, i8 %b) { ret <16 x i8> %x } -declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>) - define <32 x i8> @vrol_vv_v32i8(<32 x i8> %a, <32 x i8> %b) { ; CHECK-LABEL: vrol_vv_v32i8: ; CHECK: # %bb.0: @@ -290,8 +278,6 @@ define <32 x i8> @vrol_vx_v32i8(<32 x i8> %a, i8 %b) { ret <32 x i8> %x } -declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>) - define <64 x i8> @vrol_vv_v64i8(<64 x i8> %a, <64 x i8> %b) { ; CHECK-LABEL: vrol_vv_v64i8: ; CHECK: # %bb.0: @@ -341,8 +327,6 @@ define <64 x i8> @vrol_vx_v64i8(<64 x i8> %a, i8 %b) { ret <64 x i8> %x } -declare <1 x i16> @llvm.fshl.v1i16(<1 x i16>, <1 x i16>, <1 x i16>) - define <1 x i16> @vrol_vv_v1i16(<1 x i16> %a, <1 x i16> %b) { ; CHECK-LABEL: vrol_vv_v1i16: ; CHECK: # %bb.0: @@ -388,8 +372,6 @@ define <1 x i16> @vrol_vx_v1i16(<1 x i16> %a, i16 %b) { ret <1 x i16> %x } -declare <2 x i16> @llvm.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) - define <2 x i16> @vrol_vv_v2i16(<2 x i16> %a, <2 x i16> %b) { ; CHECK-LABEL: vrol_vv_v2i16: ; CHECK: # %bb.0: @@ -435,8 +417,6 @@ define <2 x i16> @vrol_vx_v2i16(<2 x i16> %a, i16 %b) { ret <2 x i16> %x } -declare <4 x i16> @llvm.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) - define <4 x i16> @vrol_vv_v4i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: vrol_vv_v4i16: ; CHECK: # %bb.0: @@ -482,8 +462,6 @@ define <4 x i16> @vrol_vx_v4i16(<4 x i16> %a, i16 %b) { ret <4 x i16> %x } -declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) - define <8 x i16> @vrol_vv_v8i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: vrol_vv_v8i16: ; CHECK: # %bb.0: @@ -529,8 +507,6 @@ define <8 x i16> @vrol_vx_v8i16(<8 x i16> %a, i16 %b) { ret <8 x i16> %x } -declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>) - define <16 x i16> @vrol_vv_v16i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: vrol_vv_v16i16: ; CHECK: # %bb.0: @@ -576,8 +552,6 @@ define <16 x i16> @vrol_vx_v16i16(<16 x i16> %a, i16 %b) { ret <16 x i16> %x } -declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>) - define <32 x i16> @vrol_vv_v32i16(<32 x i16> %a, <32 x i16> %b) { ; CHECK-LABEL: vrol_vv_v32i16: ; CHECK: # %bb.0: @@ -627,8 +601,6 @@ define <32 x i16> @vrol_vx_v32i16(<32 x i16> %a, i16 %b) { ret <32 x i16> %x } -declare <1 x i32> @llvm.fshl.v1i32(<1 x i32>, <1 x i32>, <1 x i32>) - define <1 x i32> @vrol_vv_v1i32(<1 x i32> %a, <1 x i32> %b) { ; CHECK-LABEL: vrol_vv_v1i32: ; CHECK: # %bb.0: @@ -676,8 +648,6 @@ define <1 x i32> @vrol_vx_v1i32(<1 x i32> %a, i32 %b) { ret <1 x i32> %x } -declare <2 x i32> @llvm.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) - define <2 x i32> @vrol_vv_v2i32(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: vrol_vv_v2i32: ; CHECK: # %bb.0: @@ -725,8 +695,6 @@ define <2 x i32> @vrol_vx_v2i32(<2 x i32> %a, i32 %b) { ret <2 x i32> %x } -declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) - define <4 x i32> @vrol_vv_v4i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: vrol_vv_v4i32: ; CHECK: # %bb.0: @@ -774,8 +742,6 @@ define <4 x i32> @vrol_vx_v4i32(<4 x i32> %a, i32 %b) { ret <4 x i32> %x } -declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) - define <8 x i32> @vrol_vv_v8i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: vrol_vv_v8i32: ; CHECK: # %bb.0: @@ -823,8 +789,6 @@ define <8 x i32> @vrol_vx_v8i32(<8 x i32> %a, i32 %b) { ret <8 x i32> %x } -declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) - define <16 x i32> @vrol_vv_v16i32(<16 x i32> %a, <16 x i32> %b) { ; CHECK-LABEL: vrol_vv_v16i32: ; CHECK: # %bb.0: @@ -872,8 +836,6 @@ define <16 x i32> @vrol_vx_v16i32(<16 x i32> %a, i32 %b) { ret <16 x i32> %x } -declare <1 x i64> @llvm.fshl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) - define <1 x i64> @vrol_vv_v1i64(<1 x i64> %a, <1 x i64> %b) { ; CHECK-LABEL: vrol_vv_v1i64: ; CHECK: # %bb.0: @@ -921,8 +883,6 @@ define <1 x i64> @vrol_vx_v1i64(<1 x i64> %a, i64 %b) { ret <1 x i64> %x } -declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) - define <2 x i64> @vrol_vv_v2i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: vrol_vv_v2i64: ; CHECK: # %bb.0: @@ -986,8 +946,6 @@ define <2 x i64> @vrol_vx_v2i64(<2 x i64> %a, i64 %b) { ret <2 x i64> %x } -declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) - define <4 x i64> @vrol_vv_v4i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: vrol_vv_v4i64: ; CHECK: # %bb.0: @@ -1051,8 +1009,6 @@ define <4 x i64> @vrol_vx_v4i64(<4 x i64> %a, i64 %b) { ret <4 x i64> %x } -declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>) - define <8 x i64> @vrol_vv_v8i64(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: vrol_vv_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll index ec22d2be1eaad..29aa04e2f308a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB -declare <1 x i8> @llvm.fshr.v1i8(<1 x i8>, <1 x i8>, <1 x i8>) -declare <1 x i8> @llvm.fshl.v1i8(<1 x i8>, <1 x i8>, <1 x i8>) - define <1 x i8> @vror_vv_v1i8(<1 x i8> %a, <1 x i8> %b) { ; CHECK-LABEL: vror_vv_v1i8: ; CHECK: # %bb.0: @@ -88,9 +85,6 @@ define <1 x i8> @vror_vi_rotl_v1i8(<1 x i8> %a) { ret <1 x i8> %x } -declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) -declare <2 x i8> @llvm.fshl.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) - define <2 x i8> @vror_vv_v2i8(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: vror_vv_v2i8: ; CHECK: # %bb.0: @@ -172,9 +166,6 @@ define <2 x i8> @vror_vi_rotl_v2i8(<2 x i8> %a) { ret <2 x i8> %x } -declare <4 x i8> @llvm.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) -declare <4 x i8> @llvm.fshl.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) - define <4 x i8> @vror_vv_v4i8(<4 x i8> %a, <4 x i8> %b) { ; CHECK-LABEL: vror_vv_v4i8: ; CHECK: # %bb.0: @@ -256,9 +247,6 @@ define <4 x i8> @vror_vi_rotl_v4i8(<4 x i8> %a) { ret <4 x i8> %x } -declare <8 x i8> @llvm.fshr.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) -declare <8 x i8> @llvm.fshl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) - define <8 x i8> @vror_vv_v8i8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: vror_vv_v8i8: ; CHECK: # %bb.0: @@ -340,9 +328,6 @@ define <8 x i8> @vror_vi_rotl_v8i8(<8 x i8> %a) { ret <8 x i8> %x } -declare <16 x i8> @llvm.fshr.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) -declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) - define <16 x i8> @vror_vv_v16i8(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: vror_vv_v16i8: ; CHECK: # %bb.0: @@ -424,9 +409,6 @@ define <16 x i8> @vror_vi_rotl_v16i8(<16 x i8> %a) { ret <16 x i8> %x } -declare <32 x i8> @llvm.fshr.v32i8(<32 x i8>, <32 x i8>, <32 x i8>) -declare <32 x i8> @llvm.fshl.v32i8(<32 x i8>, <32 x i8>, <32 x i8>) - define <32 x i8> @vror_vv_v32i8(<32 x i8> %a, <32 x i8> %b) { ; CHECK-LABEL: vror_vv_v32i8: ; CHECK: # %bb.0: @@ -516,9 +498,6 @@ define <32 x i8> @vror_vi_rotl_v32i8(<32 x i8> %a) { ret <32 x i8> %x } -declare <64 x i8> @llvm.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>) -declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>) - define <64 x i8> @vror_vv_v64i8(<64 x i8> %a, <64 x i8> %b) { ; CHECK-LABEL: vror_vv_v64i8: ; CHECK: # %bb.0: @@ -608,9 +587,6 @@ define <64 x i8> @vror_vi_rotl_v64i8(<64 x i8> %a) { ret <64 x i8> %x } -declare <1 x i16> @llvm.fshr.v1i16(<1 x i16>, <1 x i16>, <1 x i16>) -declare <1 x i16> @llvm.fshl.v1i16(<1 x i16>, <1 x i16>, <1 x i16>) - define <1 x i16> @vror_vv_v1i16(<1 x i16> %a, <1 x i16> %b) { ; CHECK-LABEL: vror_vv_v1i16: ; CHECK: # %bb.0: @@ -692,9 +668,6 @@ define <1 x i16> @vror_vi_rotl_v1i16(<1 x i16> %a) { ret <1 x i16> %x } -declare <2 x i16> @llvm.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) -declare <2 x i16> @llvm.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) - define <2 x i16> @vror_vv_v2i16(<2 x i16> %a, <2 x i16> %b) { ; CHECK-LABEL: vror_vv_v2i16: ; CHECK: # %bb.0: @@ -776,9 +749,6 @@ define <2 x i16> @vror_vi_rotl_v2i16(<2 x i16> %a) { ret <2 x i16> %x } -declare <4 x i16> @llvm.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) -declare <4 x i16> @llvm.fshl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) - define <4 x i16> @vror_vv_v4i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: vror_vv_v4i16: ; CHECK: # %bb.0: @@ -860,9 +830,6 @@ define <4 x i16> @vror_vi_rotl_v4i16(<4 x i16> %a) { ret <4 x i16> %x } -declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) -declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) - define <8 x i16> @vror_vv_v8i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: vror_vv_v8i16: ; CHECK: # %bb.0: @@ -944,9 +911,6 @@ define <8 x i16> @vror_vi_rotl_v8i16(<8 x i16> %a) { ret <8 x i16> %x } -declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>) -declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>) - define <16 x i16> @vror_vv_v16i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: vror_vv_v16i16: ; CHECK: # %bb.0: @@ -1028,9 +992,6 @@ define <16 x i16> @vror_vi_rotl_v16i16(<16 x i16> %a) { ret <16 x i16> %x } -declare <32 x i16> @llvm.fshr.v32i16(<32 x i16>, <32 x i16>, <32 x i16>) -declare <32 x i16> @llvm.fshl.v32i16(<32 x i16>, <32 x i16>, <32 x i16>) - define <32 x i16> @vror_vv_v32i16(<32 x i16> %a, <32 x i16> %b) { ; CHECK-LABEL: vror_vv_v32i16: ; CHECK: # %bb.0: @@ -1120,9 +1081,6 @@ define <32 x i16> @vror_vi_rotl_v32i16(<32 x i16> %a) { ret <32 x i16> %x } -declare <1 x i32> @llvm.fshr.v1i32(<1 x i32>, <1 x i32>, <1 x i32>) -declare <1 x i32> @llvm.fshl.v1i32(<1 x i32>, <1 x i32>, <1 x i32>) - define <1 x i32> @vror_vv_v1i32(<1 x i32> %a, <1 x i32> %b) { ; CHECK-LABEL: vror_vv_v1i32: ; CHECK: # %bb.0: @@ -1206,9 +1164,6 @@ define <1 x i32> @vror_vi_rotl_v1i32(<1 x i32> %a) { ret <1 x i32> %x } -declare <2 x i32> @llvm.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) -declare <2 x i32> @llvm.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) - define <2 x i32> @vror_vv_v2i32(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: vror_vv_v2i32: ; CHECK: # %bb.0: @@ -1292,9 +1247,6 @@ define <2 x i32> @vror_vi_rotl_v2i32(<2 x i32> %a) { ret <2 x i32> %x } -declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) - define <4 x i32> @vror_vv_v4i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: vror_vv_v4i32: ; CHECK: # %bb.0: @@ -1378,9 +1330,6 @@ define <4 x i32> @vror_vi_rotl_v4i32(<4 x i32> %a) { ret <4 x i32> %x } -declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) - define <8 x i32> @vror_vv_v8i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: vror_vv_v8i32: ; CHECK: # %bb.0: @@ -1464,9 +1413,6 @@ define <8 x i32> @vror_vi_rotl_v8i32(<8 x i32> %a) { ret <8 x i32> %x } -declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) - define <16 x i32> @vror_vv_v16i32(<16 x i32> %a, <16 x i32> %b) { ; CHECK-LABEL: vror_vv_v16i32: ; CHECK: # %bb.0: @@ -1550,9 +1496,6 @@ define <16 x i32> @vror_vi_rotl_v16i32(<16 x i32> %a) { ret <16 x i32> %x } -declare <1 x i64> @llvm.fshr.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.fshl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) - define <1 x i64> @vror_vv_v1i64(<1 x i64> %a, <1 x i64> %b) { ; CHECK-LABEL: vror_vv_v1i64: ; CHECK: # %bb.0: @@ -1666,9 +1609,6 @@ define <1 x i64> @vror_vi_rotl_v1i64(<1 x i64> %a) { ret <1 x i64> %x } -declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) - define <2 x i64> @vror_vv_v2i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: vror_vv_v2i64: ; CHECK: # %bb.0: @@ -1802,9 +1742,6 @@ define <2 x i64> @vror_vi_rotl_v2i64(<2 x i64> %a) { ret <2 x i64> %x } -declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) - define <4 x i64> @vror_vv_v4i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: vror_vv_v4i64: ; CHECK: # %bb.0: @@ -1938,9 +1875,6 @@ define <4 x i64> @vror_vi_rotl_v4i64(<4 x i64> %a) { ret <4 x i64> %x } -declare <8 x i64> @llvm.fshr.v8i64(<8 x i64>, <8 x i64>, <8 x i64>) -declare <8 x i64> @llvm.fshl.v8i64(<8 x i64>, <8 x i64>, <8 x i64>) - define <8 x i64> @vror_vv_v8i64(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: vror_vv_v8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll index a30eeeaa6690e..91eb28cffc94d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vrsub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i8: ; CHECK: # %bb.0: @@ -50,8 +48,6 @@ define <2 x i8> @vrsub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.sub.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vrsub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i8: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define <4 x i8> @vrsub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.sub.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vrsub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i8: ; CHECK: # %bb.0: @@ -142,8 +136,6 @@ define <8 x i8> @vrsub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sub.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vrsub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i8: ; CHECK: # %bb.0: @@ -188,8 +180,6 @@ define <16 x i8> @vrsub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.sub.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vrsub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i16: ; CHECK: # %bb.0: @@ -234,8 +224,6 @@ define <2 x i16> @vrsub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sub.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vrsub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i16: ; CHECK: # %bb.0: @@ -280,8 +268,6 @@ define <4 x i16> @vrsub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sub.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vrsub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i16: ; CHECK: # %bb.0: @@ -326,8 +312,6 @@ define <8 x i16> @vrsub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sub.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vrsub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i16: ; CHECK: # %bb.0: @@ -372,8 +356,6 @@ define <16 x i16> @vrsub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sub.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vrsub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i32: ; CHECK: # %bb.0: @@ -418,8 +400,6 @@ define <2 x i32> @vrsub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vrsub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i32: ; CHECK: # %bb.0: @@ -464,8 +444,6 @@ define <4 x i32> @vrsub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vrsub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i32: ; CHECK: # %bb.0: @@ -510,8 +488,6 @@ define <8 x i32> @vrsub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sub.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vrsub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i32: ; CHECK: # %bb.0: @@ -556,8 +532,6 @@ define <16 x i32> @vrsub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sub.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vrsub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v2i64: ; RV32: # %bb.0: @@ -632,8 +606,6 @@ define <2 x i64> @vrsub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sub.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vrsub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v4i64: ; RV32: # %bb.0: @@ -708,8 +680,6 @@ define <4 x i64> @vrsub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sub.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vrsub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v8i64: ; RV32: # %bb.0: @@ -784,8 +754,6 @@ define <8 x i64> @vrsub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sub.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vrsub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_v16i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll index 6ceb03c765fd7..acaa1e6fa002d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.sadd.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i7: ; CHECK: # %bb.0: @@ -25,8 +23,6 @@ define <8 x i7> @vsadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i8: ; CHECK: # %bb.0: @@ -91,8 +87,6 @@ define <2 x i8> @vsadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i8: ; CHECK: # %bb.0: @@ -169,8 +163,6 @@ define <4 x i8> @vsadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vsadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v5i8: ; CHECK: # %bb.0: @@ -235,8 +227,6 @@ define <5 x i8> @vsadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i8: ; CHECK: # %bb.0: @@ -301,8 +291,6 @@ define <8 x i8> @vsadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i8: ; CHECK: # %bb.0: @@ -367,8 +355,6 @@ define <16 x i8> @vsadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_v258i8: ; CHECK: # %bb.0: @@ -446,8 +432,6 @@ define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i16: ; CHECK: # %bb.0: @@ -512,8 +496,6 @@ define <2 x i16> @vsadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i16: ; CHECK: # %bb.0: @@ -578,8 +560,6 @@ define <4 x i16> @vsadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i16: ; CHECK: # %bb.0: @@ -644,8 +624,6 @@ define <8 x i16> @vsadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i16: ; CHECK: # %bb.0: @@ -710,8 +688,6 @@ define <16 x i16> @vsadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i32: ; CHECK: # %bb.0: @@ -776,8 +752,6 @@ define <2 x i32> @vsadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i32: ; CHECK: # %bb.0: @@ -842,8 +816,6 @@ define <4 x i32> @vsadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i32: ; CHECK: # %bb.0: @@ -908,8 +880,6 @@ define <8 x i32> @vsadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i32: ; CHECK: # %bb.0: @@ -974,8 +944,6 @@ define <16 x i32> @vsadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v2i64: ; CHECK: # %bb.0: @@ -1070,8 +1038,6 @@ define <2 x i64> @vsadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v4i64: ; CHECK: # %bb.0: @@ -1166,8 +1132,6 @@ define <4 x i64> @vsadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v8i64: ; CHECK: # %bb.0: @@ -1262,8 +1226,6 @@ define <8 x i64> @vsadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_v16i64: ; CHECK: # %bb.0: @@ -1360,8 +1322,6 @@ define <16 x i64> @vsadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll index 94c453b0edd26..105be4d87092a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @sadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: sadd_v2i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define <2 x i8> @sadd_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @sadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: sadd_v4i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define <4 x i8> @sadd_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @sadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: sadd_v8i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define <8 x i8> @sadd_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @sadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: sadd_v16i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define <16 x i8> @sadd_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @sadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: sadd_v2i16_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define <2 x i16> @sadd_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @sadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: sadd_v4i16_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define <4 x i16> @sadd_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @sadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: sadd_v8i16_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define <8 x i16> @sadd_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @sadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: sadd_v16i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define <16 x i16> @sadd_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @sadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: sadd_v2i32_vv: ; CHECK: # %bb.0: @@ -322,8 +304,6 @@ define <2 x i32> @sadd_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @sadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: sadd_v4i32_vv: ; CHECK: # %bb.0: @@ -356,8 +336,6 @@ define <4 x i32> @sadd_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @sadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: sadd_v8i32_vv: ; CHECK: # %bb.0: @@ -390,8 +368,6 @@ define <8 x i32> @sadd_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @sadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: sadd_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @sadd_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @sadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: sadd_v2i64_vv: ; CHECK: # %bb.0: @@ -472,8 +446,6 @@ define <2 x i64> @sadd_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @sadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: sadd_v4i64_vv: ; CHECK: # %bb.0: @@ -520,8 +492,6 @@ define <4 x i64> @sadd_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @sadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: sadd_v8i64_vv: ; CHECK: # %bb.0: @@ -568,8 +538,6 @@ define <8 x i64> @sadd_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @sadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: sadd_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll index 2839efd40305b..9b3b8348d9b30 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.uadd.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsaddu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i7: ; CHECK: # %bb.0: @@ -21,8 +19,6 @@ define <8 x i7> @vsaddu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zero ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsaddu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i8: ; CHECK: # %bb.0: @@ -87,8 +83,6 @@ define <2 x i8> @vsaddu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsaddu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i8: ; CHECK: # %bb.0: @@ -165,8 +159,6 @@ define <4 x i8> @vsaddu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vsaddu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v5i8: ; CHECK: # %bb.0: @@ -231,8 +223,6 @@ define <5 x i8> @vsaddu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsaddu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i8: ; CHECK: # %bb.0: @@ -297,8 +287,6 @@ define <8 x i8> @vsaddu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsaddu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i8: ; CHECK: # %bb.0: @@ -363,8 +351,6 @@ define <16 x i8> @vsaddu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_v258i8: ; CHECK: # %bb.0: @@ -442,8 +428,6 @@ define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsaddu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i16: ; CHECK: # %bb.0: @@ -508,8 +492,6 @@ define <2 x i16> @vsaddu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsaddu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i16: ; CHECK: # %bb.0: @@ -574,8 +556,6 @@ define <4 x i16> @vsaddu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsaddu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i16: ; CHECK: # %bb.0: @@ -640,8 +620,6 @@ define <8 x i16> @vsaddu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsaddu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i16: ; CHECK: # %bb.0: @@ -706,8 +684,6 @@ define <16 x i16> @vsaddu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsaddu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i32: ; CHECK: # %bb.0: @@ -772,8 +748,6 @@ define <2 x i32> @vsaddu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsaddu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i32: ; CHECK: # %bb.0: @@ -838,8 +812,6 @@ define <4 x i32> @vsaddu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsaddu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i32: ; CHECK: # %bb.0: @@ -904,8 +876,6 @@ define <8 x i32> @vsaddu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsaddu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i32: ; CHECK: # %bb.0: @@ -970,8 +940,6 @@ define <16 x i32> @vsaddu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsaddu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v2i64: ; CHECK: # %bb.0: @@ -1066,8 +1034,6 @@ define <2 x i64> @vsaddu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsaddu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v4i64: ; CHECK: # %bb.0: @@ -1162,8 +1128,6 @@ define <4 x i64> @vsaddu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsaddu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v8i64: ; CHECK: # %bb.0: @@ -1258,8 +1222,6 @@ define <8 x i64> @vsaddu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsaddu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_v16i64: ; CHECK: # %bb.0: @@ -1356,8 +1318,6 @@ define <16 x i64> @vsaddu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll index 42fa433830801..620c0e89db50d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @uadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: uadd_v2i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define <2 x i8> @uadd_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @uadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: uadd_v4i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define <4 x i8> @uadd_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @uadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: uadd_v8i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define <8 x i8> @uadd_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @uadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: uadd_v16i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define <16 x i8> @uadd_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @uadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: uadd_v2i16_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define <2 x i16> @uadd_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @uadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: uadd_v4i16_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define <4 x i16> @uadd_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @uadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: uadd_v8i16_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define <8 x i16> @uadd_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @uadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: uadd_v16i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define <16 x i16> @uadd_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @uadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: uadd_v2i32_vv: ; CHECK: # %bb.0: @@ -322,8 +304,6 @@ define <2 x i32> @uadd_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @uadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: uadd_v4i32_vv: ; CHECK: # %bb.0: @@ -356,8 +336,6 @@ define <4 x i32> @uadd_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @uadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: uadd_v8i32_vv: ; CHECK: # %bb.0: @@ -390,8 +368,6 @@ define <8 x i32> @uadd_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @uadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: uadd_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @uadd_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @uadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: uadd_v2i64_vv: ; CHECK: # %bb.0: @@ -472,8 +446,6 @@ define <2 x i64> @uadd_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @uadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: uadd_v4i64_vv: ; CHECK: # %bb.0: @@ -520,8 +492,6 @@ define <4 x i64> @uadd_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @uadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: uadd_v8i64_vv: ; CHECK: # %bb.0: @@ -568,8 +538,6 @@ define <8 x i64> @uadd_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @uadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: uadd_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll index 31ab6699d7c51..fe2a707c2d550 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp-bf16.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+m,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <2 x bfloat> @llvm.vp.select.v2bf16(<2 x i1>, <2 x bfloat>, <2 x bfloat>, i32) - define <2 x bfloat> @select_v2bf16(<2 x i1> %a, <2 x bfloat> %b, <2 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2bf16: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x bfloat> @select_v2bf16(<2 x i1> %a, <2 x bfloat> %b, <2 x bfloat> %c ret <2 x bfloat> %v } -declare <4 x bfloat> @llvm.vp.select.v4bf16(<4 x i1>, <4 x bfloat>, <4 x bfloat>, i32) - define <4 x bfloat> @select_v4bf16(<4 x i1> %a, <4 x bfloat> %b, <4 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4bf16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x bfloat> @select_v4bf16(<4 x i1> %a, <4 x bfloat> %b, <4 x bfloat> %c ret <4 x bfloat> %v } -declare <8 x bfloat> @llvm.vp.select.v8bf16(<8 x i1>, <8 x bfloat>, <8 x bfloat>, i32) - define <8 x bfloat> @select_v8bf16(<8 x i1> %a, <8 x bfloat> %b, <8 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8bf16: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x bfloat> @select_v8bf16(<8 x i1> %a, <8 x bfloat> %b, <8 x bfloat> %c ret <8 x bfloat> %v } -declare <16 x bfloat> @llvm.vp.select.v16bf16(<16 x i1>, <16 x bfloat>, <16 x bfloat>, i32) - define <16 x bfloat> @select_v16bf16(<16 x i1> %a, <16 x bfloat> %b, <16 x bfloat> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll index 93f024c2b77a5..f2f9f90f386c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.vp.select.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v1i1: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.select.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i1: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.select.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.select.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i1: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.select.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i1: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zer ret <16 x i1> %v } -declare <8 x i7> @llvm.vp.select.v8i7(<8 x i1>, <8 x i7>, <8 x i7>, i32) - define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i7: ; CHECK: # %bb.0: @@ -90,8 +78,6 @@ define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.select.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32) - define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i8: ; CHECK: # %bb.0: @@ -102,8 +88,6 @@ define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.select.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32) - define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i8: ; CHECK: # %bb.0: @@ -114,8 +98,6 @@ define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.select.v5i8(<5 x i1>, <5 x i8>, <5 x i8>, i32) - define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v5i8: ; CHECK: # %bb.0: @@ -126,8 +108,6 @@ define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.select.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32) - define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i8: ; CHECK: # %bb.0: @@ -138,8 +118,6 @@ define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.select.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32) - define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i8: ; CHECK: # %bb.0: @@ -150,8 +128,6 @@ define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zer ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.select.v256i8(<256 x i1>, <256 x i8>, <256 x i8>, i32) - define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v256i8: ; CHECK: # %bb.0: @@ -223,8 +199,6 @@ define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.select.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32) - define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i16: ; CHECK: # %bb.0: @@ -235,8 +209,6 @@ define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zero ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.select.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32) - define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i16: ; CHECK: # %bb.0: @@ -247,8 +219,6 @@ define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zero ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) - define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i16: ; CHECK: # %bb.0: @@ -259,8 +229,6 @@ define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zero ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.select.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32) - define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i16: ; CHECK: # %bb.0: @@ -271,8 +239,6 @@ define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.select.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32) - define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i32: ; CHECK: # %bb.0: @@ -283,8 +249,6 @@ define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zero ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.select.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32) - define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i32: ; CHECK: # %bb.0: @@ -295,8 +259,6 @@ define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zero ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) - define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i32: ; CHECK: # %bb.0: @@ -307,8 +269,6 @@ define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zero ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.select.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32) - define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i32: ; CHECK: # %bb.0: @@ -319,8 +279,6 @@ define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.select.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32) - define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i64: ; CHECK: # %bb.0: @@ -331,8 +289,6 @@ define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zero ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.select.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32) - define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i64: ; CHECK: # %bb.0: @@ -343,8 +299,6 @@ define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zero ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.select.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32) - define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i64: ; CHECK: # %bb.0: @@ -355,8 +309,6 @@ define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zero ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.select.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32) - define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i64: ; CHECK: # %bb.0: @@ -367,8 +319,6 @@ define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 ret <16 x i64> %v } -declare <32 x i64> @llvm.vp.select.v32i64(<32 x i1>, <32 x i64>, <32 x i64>, i32) - define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v32i64: ; CHECK: # %bb.0: @@ -458,8 +408,6 @@ define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) ret <32 x i64> %v } -declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32) - define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f16: ; CHECK: # %bb.0: @@ -470,8 +418,6 @@ define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 z ret <2 x half> %v } -declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32) - define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f16: ; CHECK: # %bb.0: @@ -482,8 +428,6 @@ define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 z ret <4 x half> %v } -declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32) - define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f16: ; CHECK: # %bb.0: @@ -494,8 +438,6 @@ define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 z ret <8 x half> %v } -declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32) - define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f16: ; CHECK: # %bb.0: @@ -506,8 +448,6 @@ define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, ret <16 x half> %v } -declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32) - define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f32: ; CHECK: # %bb.0: @@ -518,8 +458,6 @@ define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i3 ret <2 x float> %v } -declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32) - define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f32: ; CHECK: # %bb.0: @@ -530,8 +468,6 @@ define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i3 ret <4 x float> %v } -declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) - define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f32: ; CHECK: # %bb.0: @@ -542,8 +478,6 @@ define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i3 ret <8 x float> %v } -declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32) - define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f32: ; CHECK: # %bb.0: @@ -554,8 +488,6 @@ define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> % ret <16 x float> %v } -declare <64 x float> @llvm.vp.select.v64f32(<64 x i1>, <64 x float>, <64 x float>, i32) - define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v64f32: ; CHECK: # %bb.0: @@ -600,8 +532,6 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> % ret <64 x float> %v } -declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32) - define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f64: ; CHECK: # %bb.0: @@ -612,8 +542,6 @@ define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, ret <2 x double> %v } -declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32) - define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f64: ; CHECK: # %bb.0: @@ -624,8 +552,6 @@ define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, ret <4 x double> %v } -declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) - define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f64: ; CHECK: # %bb.0: @@ -636,8 +562,6 @@ define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, ret <8 x double> %v } -declare <16 x double> @llvm.vp.select.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32) - define <16 x double> @select_v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll index 16a0fddfa9827..7730d6e5e1312 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.shl.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.shl.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsll_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i8: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define <2 x i8> @vsll_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <3 x i8> @llvm.vp.shl.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v3i8: ; CHECK: # %bb.0: @@ -96,8 +90,6 @@ define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroex ret <3 x i8> %v } -declare <4 x i8> @llvm.vp.shl.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsll_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i8: ; CHECK: # %bb.0: @@ -162,8 +154,6 @@ define <4 x i8> @vsll_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.shl.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsll_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i8: ; CHECK: # %bb.0: @@ -228,8 +218,6 @@ define <8 x i8> @vsll_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.shl.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsll_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i8: ; CHECK: # %bb.0: @@ -294,8 +282,6 @@ define <16 x i8> @vsll_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.shl.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsll_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i16: ; CHECK: # %bb.0: @@ -360,8 +346,6 @@ define <2 x i16> @vsll_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.shl.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsll_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i16: ; CHECK: # %bb.0: @@ -426,8 +410,6 @@ define <4 x i16> @vsll_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.shl.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsll_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i16: ; CHECK: # %bb.0: @@ -492,8 +474,6 @@ define <8 x i16> @vsll_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.shl.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsll_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i16: ; CHECK: # %bb.0: @@ -558,8 +538,6 @@ define <16 x i16> @vsll_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.shl.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsll_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i32: ; CHECK: # %bb.0: @@ -624,8 +602,6 @@ define <2 x i32> @vsll_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsll_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i32: ; CHECK: # %bb.0: @@ -690,8 +666,6 @@ define <4 x i32> @vsll_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.shl.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsll_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i32: ; CHECK: # %bb.0: @@ -756,8 +730,6 @@ define <8 x i32> @vsll_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.shl.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsll_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i32: ; CHECK: # %bb.0: @@ -822,8 +794,6 @@ define <16 x i32> @vsll_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.shl.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsll_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i64: ; CHECK: # %bb.0: @@ -900,8 +870,6 @@ define <2 x i64> @vsll_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.shl.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsll_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i64: ; CHECK: # %bb.0: @@ -978,8 +946,6 @@ define <4 x i64> @vsll_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.shl.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsll_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i64: ; CHECK: # %bb.0: @@ -1056,8 +1022,6 @@ define <8 x i64> @vsll_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.shl.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsll_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll index 180fafa9659b1..1d0c3a6937b54 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i8: ; CHECK: # %bb.0: @@ -86,8 +82,6 @@ define <2 x i8> @vsra_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsra_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i8: ; CHECK: # %bb.0: @@ -152,8 +146,6 @@ define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v7i8: ; CHECK: # %bb.0: @@ -164,8 +156,6 @@ define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroex ret <7 x i8> %v } -declare <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i8: ; CHECK: # %bb.0: @@ -230,8 +220,6 @@ define <8 x i8> @vsra_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsra_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i8: ; CHECK: # %bb.0: @@ -296,8 +284,6 @@ define <16 x i8> @vsra_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsra_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i16: ; CHECK: # %bb.0: @@ -362,8 +348,6 @@ define <2 x i16> @vsra_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsra_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i16: ; CHECK: # %bb.0: @@ -428,8 +412,6 @@ define <4 x i16> @vsra_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsra_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i16: ; CHECK: # %bb.0: @@ -494,8 +476,6 @@ define <8 x i16> @vsra_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsra_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i16: ; CHECK: # %bb.0: @@ -560,8 +540,6 @@ define <16 x i16> @vsra_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsra_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i32: ; CHECK: # %bb.0: @@ -626,8 +604,6 @@ define <2 x i32> @vsra_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsra_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i32: ; CHECK: # %bb.0: @@ -692,8 +668,6 @@ define <4 x i32> @vsra_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsra_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i32: ; CHECK: # %bb.0: @@ -758,8 +732,6 @@ define <8 x i32> @vsra_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsra_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i32: ; CHECK: # %bb.0: @@ -824,8 +796,6 @@ define <16 x i32> @vsra_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsra_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i64: ; CHECK: # %bb.0: @@ -902,8 +872,6 @@ define <2 x i64> @vsra_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsra_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i64: ; CHECK: # %bb.0: @@ -980,8 +948,6 @@ define <4 x i64> @vsra_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsra_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i64: ; CHECK: # %bb.0: @@ -1058,8 +1024,6 @@ define <8 x i64> @vsra_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsra_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll index 22f04803eadd7..c8659b6d9739e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i7: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i8: ; CHECK: # %bb.0: @@ -85,8 +81,6 @@ define <2 x i8> @vsrl_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsrl_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i8: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) - define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v7i8: ; CHECK: # %bb.0: @@ -163,8 +155,6 @@ define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroex ret <7 x i8> %v } -declare <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i8: ; CHECK: # %bb.0: @@ -229,8 +219,6 @@ define <8 x i8> @vsrl_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsrl_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i8: ; CHECK: # %bb.0: @@ -295,8 +283,6 @@ define <16 x i8> @vsrl_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsrl_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i16: ; CHECK: # %bb.0: @@ -361,8 +347,6 @@ define <2 x i16> @vsrl_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsrl_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i16: ; CHECK: # %bb.0: @@ -427,8 +411,6 @@ define <4 x i16> @vsrl_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsrl_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i16: ; CHECK: # %bb.0: @@ -493,8 +475,6 @@ define <8 x i16> @vsrl_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsrl_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i16: ; CHECK: # %bb.0: @@ -559,8 +539,6 @@ define <16 x i16> @vsrl_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsrl_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i32: ; CHECK: # %bb.0: @@ -625,8 +603,6 @@ define <2 x i32> @vsrl_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsrl_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i32: ; CHECK: # %bb.0: @@ -691,8 +667,6 @@ define <4 x i32> @vsrl_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsrl_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i32: ; CHECK: # %bb.0: @@ -757,8 +731,6 @@ define <8 x i32> @vsrl_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsrl_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i32: ; CHECK: # %bb.0: @@ -823,8 +795,6 @@ define <16 x i32> @vsrl_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsrl_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i64: ; CHECK: # %bb.0: @@ -901,8 +871,6 @@ define <2 x i64> @vsrl_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsrl_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i64: ; CHECK: # %bb.0: @@ -979,8 +947,6 @@ define <4 x i64> @vsrl_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsrl_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i64: ; CHECK: # %bb.0: @@ -1057,8 +1023,6 @@ define <8 x i64> @vsrl_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsrl_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll index 79856de033060..4c7d312e8e785 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.ssub.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vssub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i7: ; CHECK: # %bb.0: @@ -25,8 +23,6 @@ define <8 x i7> @vssub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroe ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vssub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i8: ; CHECK: # %bb.0: @@ -93,8 +89,6 @@ define <2 x i8> @vssub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vssub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i8: ; CHECK: # %bb.0: @@ -175,8 +169,6 @@ define <4 x i8> @vssub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vssub_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v5i8: ; CHECK: # %bb.0: @@ -243,8 +235,6 @@ define <5 x i8> @vssub_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vssub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i8: ; CHECK: # %bb.0: @@ -311,8 +301,6 @@ define <8 x i8> @vssub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vssub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i8: ; CHECK: # %bb.0: @@ -379,8 +367,6 @@ define <16 x i8> @vssub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_v258i8: ; CHECK: # %bb.0: @@ -462,8 +448,6 @@ define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vssub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i16: ; CHECK: # %bb.0: @@ -530,8 +514,6 @@ define <2 x i16> @vssub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vssub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i16: ; CHECK: # %bb.0: @@ -598,8 +580,6 @@ define <4 x i16> @vssub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vssub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i16: ; CHECK: # %bb.0: @@ -666,8 +646,6 @@ define <8 x i16> @vssub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vssub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i16: ; CHECK: # %bb.0: @@ -734,8 +712,6 @@ define <16 x i16> @vssub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vssub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i32: ; CHECK: # %bb.0: @@ -802,8 +778,6 @@ define <2 x i32> @vssub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vssub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i32: ; CHECK: # %bb.0: @@ -870,8 +844,6 @@ define <4 x i32> @vssub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vssub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i32: ; CHECK: # %bb.0: @@ -938,8 +910,6 @@ define <8 x i32> @vssub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vssub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i32: ; CHECK: # %bb.0: @@ -1006,8 +976,6 @@ define <16 x i32> @vssub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vssub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v2i64: ; CHECK: # %bb.0: @@ -1104,8 +1072,6 @@ define <2 x i64> @vssub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vssub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v4i64: ; CHECK: # %bb.0: @@ -1202,8 +1168,6 @@ define <4 x i64> @vssub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vssub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v8i64: ; CHECK: # %bb.0: @@ -1300,8 +1264,6 @@ define <8 x i64> @vssub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vssub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_v16i64: ; CHECK: # %bb.0: @@ -1400,8 +1362,6 @@ define <16 x i64> @vssub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll index b64e5c4d3467f..392c20756e185 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @ssub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: ssub_v2i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @ssub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: ssub_v4i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @ssub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: ssub_v8i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @ssub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: ssub_v16i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @ssub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: ssub_v2i16_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @ssub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: ssub_v4i16_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @ssub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: ssub_v8i16_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @ssub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: ssub_v16i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @ssub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: ssub_v2i32_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @ssub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: ssub_v4i32_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @ssub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: ssub_v8i32_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @ssub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: ssub_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @ssub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: ssub_v2i64_vv: ; CHECK: # %bb.0: @@ -473,8 +447,6 @@ define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @ssub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: ssub_v4i64_vv: ; CHECK: # %bb.0: @@ -522,8 +494,6 @@ define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @ssub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: ssub_v8i64_vv: ; CHECK: # %bb.0: @@ -571,8 +541,6 @@ define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @ssub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: ssub_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll index 7a9bef49c994d..f9000a1b88a6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.usub.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vssubu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i7: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define <8 x i7> @vssubu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zero ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vssubu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i8: ; CHECK: # %bb.0: @@ -88,8 +84,6 @@ define <2 x i8> @vssubu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vssubu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i8: ; CHECK: # %bb.0: @@ -170,8 +164,6 @@ define <4 x i8> @vssubu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) - define <5 x i8> @vssubu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v5i8: ; CHECK: # %bb.0: @@ -238,8 +230,6 @@ define <5 x i8> @vssubu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ret <5 x i8> %v } -declare <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vssubu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i8: ; CHECK: # %bb.0: @@ -306,8 +296,6 @@ define <8 x i8> @vssubu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vssubu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i8: ; CHECK: # %bb.0: @@ -374,8 +362,6 @@ define <16 x i8> @vssubu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32) - define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_v258i8: ; CHECK: # %bb.0: @@ -457,8 +443,6 @@ define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -declare <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vssubu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i16: ; CHECK: # %bb.0: @@ -525,8 +509,6 @@ define <2 x i16> @vssubu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vssubu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i16: ; CHECK: # %bb.0: @@ -593,8 +575,6 @@ define <4 x i16> @vssubu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vssubu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i16: ; CHECK: # %bb.0: @@ -661,8 +641,6 @@ define <8 x i16> @vssubu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vssubu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i16: ; CHECK: # %bb.0: @@ -729,8 +707,6 @@ define <16 x i16> @vssubu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vssubu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i32: ; CHECK: # %bb.0: @@ -797,8 +773,6 @@ define <2 x i32> @vssubu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vssubu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i32: ; CHECK: # %bb.0: @@ -865,8 +839,6 @@ define <4 x i32> @vssubu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vssubu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i32: ; CHECK: # %bb.0: @@ -933,8 +905,6 @@ define <8 x i32> @vssubu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vssubu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i32: ; CHECK: # %bb.0: @@ -1001,8 +971,6 @@ define <16 x i32> @vssubu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vssubu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v2i64: ; CHECK: # %bb.0: @@ -1099,8 +1067,6 @@ define <2 x i64> @vssubu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vssubu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v4i64: ; CHECK: # %bb.0: @@ -1197,8 +1163,6 @@ define <4 x i64> @vssubu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vssubu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v8i64: ; CHECK: # %bb.0: @@ -1295,8 +1259,6 @@ define <8 x i64> @vssubu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vssubu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_v16i64: ; CHECK: # %bb.0: @@ -1395,8 +1357,6 @@ define <16 x i64> @vssubu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; Test that split-legalization works as expected. -declare <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32) - define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vx_v32i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll index 26a8879bfdf9f..65a21d8e14366 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>) - define <2 x i8> @usub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: usub_v2i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) { ret <2 x i8> %v } -declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) - define <4 x i8> @usub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: usub_v4i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) { ret <4 x i8> %v } -declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) - define <8 x i8> @usub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: usub_v8i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) { ret <8 x i8> %v } -declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) - define <16 x i8> @usub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: usub_v16i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) { ret <16 x i8> %v } -declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) - define <2 x i16> @usub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: usub_v2i16_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) { ret <2 x i16> %v } -declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) - define <4 x i16> @usub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: usub_v4i16_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) { ret <4 x i16> %v } -declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) - define <8 x i16> @usub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: usub_v8i16_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) { ret <8 x i16> %v } -declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) - define <16 x i16> @usub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: usub_v16i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) { ret <16 x i16> %v } -declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>) - define <2 x i32> @usub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: usub_v2i32_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) { ret <2 x i32> %v } -declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) - define <4 x i32> @usub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: usub_v4i32_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) { ret <4 x i32> %v } -declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>) - define <8 x i32> @usub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: usub_v8i32_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) { ret <8 x i32> %v } -declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>) - define <16 x i32> @usub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: usub_v16i32_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) { ret <16 x i32> %v } -declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>) - define <2 x i64> @usub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: usub_v2i64_vv: ; CHECK: # %bb.0: @@ -473,8 +447,6 @@ define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) { ret <2 x i64> %v } -declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>) - define <4 x i64> @usub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: usub_v4i64_vv: ; CHECK: # %bb.0: @@ -522,8 +494,6 @@ define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) { ret <4 x i64> %v } -declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>) - define <8 x i64> @usub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: usub_v8i64_vv: ; CHECK: # %bb.0: @@ -571,8 +541,6 @@ define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) { ret <8 x i64> %v } -declare <16 x i64> @llvm.usub.sat.v16i64(<16 x i64>, <16 x i64>) - define <16 x i64> @usub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: usub_v16i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll index 7f3bbc3dacde3..87ec263e8262b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare <2 x i1> @llvm.vp.sub.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroex ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.sub.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroex ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.sub.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroex ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.sub.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 z ret <16 x i1> %v } -declare <32 x i1> @llvm.vp.sub.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32) - define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v32i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 z ret <32 x i1> %v } -declare <64 x i1> @llvm.vp.sub.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32) - define <64 x i1> @vsub_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll index 5c57aa139f065..e5bfd4d6c688e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.sub.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vsub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i8: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define <2 x i8> @vsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } -declare <3 x i8> @llvm.vp.sub.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) - define <3 x i8> @vsub_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v3i8: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define <3 x i8> @vsub_vx_v3i8_unmasked(<3 x i8> %va, i8 %b, i32 zeroext %evl) { ret <3 x i8> %v } -declare <4 x i8> @llvm.vp.sub.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vsub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i8: ; CHECK: # %bb.0: @@ -154,8 +146,6 @@ define <4 x i8> @vsub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.sub.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vsub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i8: ; CHECK: # %bb.0: @@ -200,8 +190,6 @@ define <8 x i8> @vsub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ret <8 x i8> %v } -declare <16 x i8> @llvm.vp.sub.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vsub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i8: ; CHECK: # %bb.0: @@ -246,8 +234,6 @@ define <16 x i8> @vsub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.sub.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vsub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i16: ; CHECK: # %bb.0: @@ -292,8 +278,6 @@ define <2 x i16> @vsub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.sub.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vsub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i16: ; CHECK: # %bb.0: @@ -338,8 +322,6 @@ define <4 x i16> @vsub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.sub.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vsub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i16: ; CHECK: # %bb.0: @@ -384,8 +366,6 @@ define <8 x i16> @vsub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.sub.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vsub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i16: ; CHECK: # %bb.0: @@ -430,8 +410,6 @@ define <16 x i16> @vsub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext % ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.sub.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vsub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i32: ; CHECK: # %bb.0: @@ -476,8 +454,6 @@ define <2 x i32> @vsub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i32: ; CHECK: # %bb.0: @@ -522,8 +498,6 @@ define <4 x i32> @vsub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vsub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i32: ; CHECK: # %bb.0: @@ -568,8 +542,6 @@ define <8 x i32> @vsub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.sub.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vsub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i32: ; CHECK: # %bb.0: @@ -614,8 +586,6 @@ define <16 x i32> @vsub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext % ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.sub.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vsub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i64: ; CHECK: # %bb.0: @@ -690,8 +660,6 @@ define <2 x i64> @vsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.sub.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vsub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i64: ; CHECK: # %bb.0: @@ -766,8 +734,6 @@ define <4 x i64> @vsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.sub.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vsub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i64: ; CHECK: # %bb.0: @@ -842,8 +808,6 @@ define <8 x i64> @vsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.sub.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vsub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll index 97b86a8eff19f..2cb344434eec8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare <8 x i7> @llvm.vp.xor.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) - define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i7: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroex ret <8 x i7> %v } -declare <2 x i8> @llvm.vp.xor.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) - define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i8: ; CHECK: # %bb.0: @@ -114,8 +110,6 @@ define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } -declare <4 x i8> @llvm.vp.xor.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) - define <4 x i8> @vxor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i8: ; CHECK: # %bb.0: @@ -200,8 +194,6 @@ define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } -declare <8 x i8> @llvm.vp.xor.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) - define <8 x i8> @vxor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i8: ; CHECK: # %bb.0: @@ -286,8 +278,6 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } -declare <9 x i8> @llvm.vp.xor.v9i8(<9 x i8>, <9 x i8>, <9 x i1>, i32) - define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v9i8: ; CHECK: # %bb.0: @@ -372,8 +362,6 @@ define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) { ret <9 x i8> %v } -declare <16 x i8> @llvm.vp.xor.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) - define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i8: ; CHECK: # %bb.0: @@ -458,8 +446,6 @@ define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) { ret <16 x i8> %v } -declare <2 x i16> @llvm.vp.xor.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) - define <2 x i16> @vxor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i16: ; CHECK: # %bb.0: @@ -544,8 +530,6 @@ define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) { ret <2 x i16> %v } -declare <4 x i16> @llvm.vp.xor.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) - define <4 x i16> @vxor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i16: ; CHECK: # %bb.0: @@ -630,8 +614,6 @@ define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <8 x i16> @llvm.vp.xor.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) - define <8 x i16> @vxor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i16: ; CHECK: # %bb.0: @@ -716,8 +698,6 @@ define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) { ret <8 x i16> %v } -declare <16 x i16> @llvm.vp.xor.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) - define <16 x i16> @vxor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i16: ; CHECK: # %bb.0: @@ -802,8 +782,6 @@ define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) { ret <16 x i16> %v } -declare <2 x i32> @llvm.vp.xor.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) - define <2 x i32> @vxor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i32: ; CHECK: # %bb.0: @@ -888,8 +866,6 @@ define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) { ret <2 x i32> %v } -declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vxor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i32: ; CHECK: # %bb.0: @@ -974,8 +950,6 @@ define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <8 x i32> @llvm.vp.xor.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) - define <8 x i32> @vxor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i32: ; CHECK: # %bb.0: @@ -1060,8 +1034,6 @@ define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) { ret <8 x i32> %v } -declare <16 x i32> @llvm.vp.xor.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) - define <16 x i32> @vxor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i32: ; CHECK: # %bb.0: @@ -1146,8 +1118,6 @@ define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) { ret <16 x i32> %v } -declare <2 x i64> @llvm.vp.xor.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) - define <2 x i64> @vxor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i64: ; CHECK: # %bb.0: @@ -1262,8 +1232,6 @@ define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) { ret <2 x i64> %v } -declare <4 x i64> @llvm.vp.xor.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) - define <4 x i64> @vxor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i64: ; CHECK: # %bb.0: @@ -1378,8 +1346,6 @@ define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <8 x i64> @llvm.vp.xor.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) - define <8 x i64> @vxor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i64: ; CHECK: # %bb.0: @@ -1494,8 +1460,6 @@ define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } -declare <16 x i64> @llvm.vp.xor.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) - define <16 x i64> @vxor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll index 88803f7cd5d89..df6dce28df244 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <2 x i8> @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <4 x i8> @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <8 x i8> @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <16 x i8> @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <32 x i8> @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <64 x i8> @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <1 x i16> @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <2 x i16> @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <4 x i16> @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <8 x i16> @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <16 x i16> @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <32 x i16> @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <1 x i32> @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <2 x i32> @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <4 x i32> @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <8 x i32> @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <16 x i32> @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <1 x i8> @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf8: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <2 x i8> @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf4: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <4 x i8> @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf2: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <8 x i8> @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m1: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <16 x i8> @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m2: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <32 x i8> @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m4: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <64 x i8> @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m8: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define <1 x i16> @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf4: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <2 x i16> @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf2: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <4 x i16> @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m1: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <8 x i16> @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <16 x i16> @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m4: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <32 x i16> @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m8: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <1 x i32> @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32mf2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <2 x i32> @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m1: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <4 x i32> @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m2: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <8 x i32> @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m4: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <16 x i32> @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m8: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define void @test_sf_vc_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_i_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_i_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_i_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_i_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m1: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_i_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m2: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_i_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_i_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m8: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_i_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_i_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_i_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m1: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_i_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m2: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_i_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_i_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m8: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_i_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_i_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m1: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_i_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m2: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_i_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m4: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_i_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m8: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_i_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_i_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_i_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_i_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x half> @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <2 x half> @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <4 x half> @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <8 x half> @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <16 x half> @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <32 x half> @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define <1 x float> @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <2 x float> @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <4 x float> @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <8 x float> @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <16 x float> @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define <1 x half> @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x half> @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x half> @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x half> @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x half> @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <32 x half> @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <1 x float> @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <2 x float> @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <4 x float> @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <8 x float> @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define <16 x float> @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1848,4 +1566,3 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll index b553a62ae496a..1ec74e3452c57 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, <1 x i8>, <1 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, <2 x i8>, <2 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, <4 x i8>, <4 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, <8 x i8>, <8 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, <16 x i8>, <16 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, <32 x i8>, <32 x i8>, iXLen) - define void @test_sf_vc_vv_se_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, <64 x i8>, <64 x i8>, iXLen) - define void @test_sf_vc_vv_se_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_vv_se_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, <32 x i16>, <32 x i16>, iXLen) - define void @test_sf_vc_vv_se_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_vv_se_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, <16 x i32>, <16 x i32>, iXLen) - define void @test_sf_vc_vv_se_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, <1 x i64>, <1 x i64>, iXLen) - define void @test_sf_vc_vv_se_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, <2 x i64>, <2 x i64>, iXLen) - define void @test_sf_vc_vv_se_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, <4 x i64>, <4 x i64>, iXLen) - define void @test_sf_vc_vv_se_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vv_se_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vv_se_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vv_se_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vv_se_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vv_se_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vv_se_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vv_se_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vv_se_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vv_se_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vv_se_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vv_se_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vv_se_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vv_se_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vv_se_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vv_se_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vv_se_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vv_se_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vv_se_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vv_se_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vv_se_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vv_se_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vv_se_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vv_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vv_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vv_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vv_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m1: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vv_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m2: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vv_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m4: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vv_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m8: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vv_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vv_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vv_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vv_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vv_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vv_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m8: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vv_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vv_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m1: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vv_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m2: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vv_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m4: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vv_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m8: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vv_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m1: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vv_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m2: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vv_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m4: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vv_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m8: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen) - define void @test_sf_vc_xv_se_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, <1 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, <2 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, <4 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, <8 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, <16 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, <32 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, <64 x i8>, i8, iXLen) - define void @test_sf_vc_xv_se_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, <1 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, <2 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, <4 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, <8 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, <16 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, <32 x i16>, i16, iXLen) - define void @test_sf_vc_xv_se_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, <1 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, <2 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, <4 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, <8 x i32>, i32, iXLen) - define void @test_sf_vc_xv_se_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xv_se_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xv_se_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xv_se_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xv_se_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xv_se_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xv_se_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xv_se_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xv_se_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xv_se_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xv_se_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xv_se_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xv_se_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xv_se_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xv_se_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xv_se_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xv_se_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xv_se_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xv_se_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xv_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xv_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xv_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xv_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xv_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xv_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xv_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xv_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xv_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xv_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xv_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xv_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xv_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xv_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xv_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xv_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xv_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xv_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, <16 x i32>, i32, iXLen) - define void @test_sf_vc_iv_se_e8mf8(<1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, <1 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf4(<2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, <2 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf2(<4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, <4 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m1(<8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, <8 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m2(<16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, <16 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m4(<32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, <32 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m8(<64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, <64 x i8>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf4(<1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf2(<2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m1(<4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m2(<8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m4(<16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m8(<32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, <32 x i16>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32mf2(<1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m1(<2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m2(<4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m4(<8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m8(<16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, <16 x i32>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m1(<1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, <1 x i64>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m2(<2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, <2 x i64>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m4(<4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, <4 x i64>, iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m8(<8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_iv_se_e8mf8(<1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_iv_se_e8mf4(<2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_iv_se_e8mf2(<4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_iv_se_e8m1(<8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_iv_se_e8m2(<16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_iv_se_e8m4(<32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_iv_se_e8m8(<64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_iv_se_e16mf4(<1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_iv_se_e16mf2(<2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_iv_se_e16m1(<4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_iv_se_e16m2(<8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_iv_se_e16m4(<16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_iv_se_e16m8(<32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_iv_se_e32mf2(<1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_iv_se_e32m1(<2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_iv_se_e32m2(<4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_iv_se_e32m4(<8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_iv_se_e32m8(<16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_iv_se_e64m1(<1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_iv_se_e64m2(<2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_iv_se_e64m4(<4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_iv_se_e64m8(<8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_iv_e8mf8(<1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_iv_e8mf4(<2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_iv_e8mf2(<4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_iv_e8m1(<8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_iv_e8m2(<16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_iv_e8m4(<32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_iv_e8m8(<64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_iv_e16mf4(<1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_iv_e16mf2(<2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_iv_e16m1(<4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_iv_e16m2(<8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_iv_e16m4(<16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_iv_e16m8(<32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_iv_e32mf2(<1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_iv_e32m1(<2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_iv_e32m2(<4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_iv_e32m4(<8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_iv_e32m8(<16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_iv_e64m1(<1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_iv_e64m2(<2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_iv_e64m4(<4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_iv_e64m8(<8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen) - define void @test_sf_vc_fvv_se_e16mf4(<1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen, iXLen, <1 x half>, <1 x i16>, iXLen) - define <1 x half> @test_sf_vc_v_fvv_se_e16mf4(<1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16mf2(<2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen, iXLen, <2 x half>, <2 x i16>, iXLen) - define <2 x half> @test_sf_vc_v_fvv_se_e16mf2(<2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m1(<4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen, iXLen, <4 x half>, <4 x i16>, iXLen) - define <4 x half> @test_sf_vc_v_fvv_se_e16m1(<4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m2(<8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen, iXLen, <8 x half>, <8 x i16>, iXLen) - define <8 x half> @test_sf_vc_v_fvv_se_e16m2(<8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m4(<16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen, iXLen, <16 x half>, <16 x i16>, iXLen) - define <16 x half> @test_sf_vc_v_fvv_se_e16m4(<16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e16m8(<32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen, iXLen, <32 x half>, <32 x i16>, iXLen) - define <32 x half> @test_sf_vc_v_fvv_se_e16m8(<32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen) - define void @test_sf_vc_fvv_se_e32mf2(<1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen, iXLen, <1 x float>, <1 x i32>, iXLen) - define <1 x float> @test_sf_vc_v_fvv_se_e32mf2(<1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m1(<2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen, iXLen, <2 x float>, <2 x i32>, iXLen) - define <2 x float> @test_sf_vc_v_fvv_se_e32m1(<2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m2(<4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen, iXLen, <4 x float>, <4 x i32>, iXLen) - define <4 x float> @test_sf_vc_v_fvv_se_e32m2(<4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m4(<8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen, iXLen, <8 x float>, <8 x i32>, iXLen) - define <8 x float> @test_sf_vc_v_fvv_se_e32m4(<8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e32m8(<16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2693,8 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen, iXLen, <16 x float>, <16 x i32>, iXLen) - define <16 x float> @test_sf_vc_v_fvv_se_e32m8(<16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2706,8 +2292,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen) - define void @test_sf_vc_fvv_se_e64m1(<1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2719,8 +2303,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen, iXLen, <1 x double>, <1 x i64>, iXLen) - define <1 x double> @test_sf_vc_v_fvv_se_e64m1(<1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2732,8 +2314,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <1 x double>, <1 x i64>, iXLen) - define void @test_sf_vc_fvv_se_e64m2(<2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2745,8 +2325,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen, iXLen, <2 x double>, <2 x i64>, iXLen) - define <2 x double> @test_sf_vc_v_fvv_se_e64m2(<2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2758,8 +2336,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <2 x double>, <2 x i64>, iXLen) - define void @test_sf_vc_fvv_se_e64m4(<4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2771,8 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen, iXLen, <4 x double>, <4 x i64>, iXLen) - define <4 x double> @test_sf_vc_v_fvv_se_e64m4(<4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2784,8 +2358,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <4 x double>, <4 x i64>, iXLen) - define void @test_sf_vc_fvv_se_e64m8(<8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2797,8 +2369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen, iXLen, <8 x double>, <8 x i64>, iXLen) - define <8 x double> @test_sf_vc_v_fvv_se_e64m8(<8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2810,8 +2380,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <8 x double>, <8 x i64>, iXLen) - define void @test_sf_vc_fvx_se_e16mf4(<1 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2823,8 +2391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen, iXLen, <1 x half>, i16, iXLen) - define <1 x half> @test_sf_vc_v_fvx_se_e16mf4(<1 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2836,8 +2402,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen, <1 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16mf2(<2 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2849,8 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen, iXLen, <2 x half>, i16, iXLen) - define <2 x half> @test_sf_vc_v_fvx_se_e16mf2(<2 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2862,8 +2424,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen, <2 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m1(<4 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2875,8 +2435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen, iXLen, <4 x half>, i16, iXLen) - define <4 x half> @test_sf_vc_v_fvx_se_e16m1(<4 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2888,8 +2446,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen, <4 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m2(<8 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2901,8 +2457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen, iXLen, <8 x half>, i16, iXLen) - define <8 x half> @test_sf_vc_v_fvx_se_e16m2(<8 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2914,8 +2468,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen, <8 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m4(<16 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2927,8 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen, iXLen, <16 x half>, i16, iXLen) - define <16 x half> @test_sf_vc_v_fvx_se_e16m4(<16 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2940,8 +2490,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen, <16 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e16m8(<32 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2953,8 +2501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen, iXLen, <32 x half>, i16, iXLen) - define <32 x half> @test_sf_vc_v_fvx_se_e16m8(<32 x half> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2966,8 +2512,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen, <32 x half>, i16, iXLen) - define void @test_sf_vc_fvx_se_e32mf2(<1 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2979,8 +2523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen, iXLen, <1 x float>, i32, iXLen) - define <1 x float> @test_sf_vc_v_fvx_se_e32mf2(<1 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2992,8 +2534,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen, <1 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m1(<2 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3005,8 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen, iXLen, <2 x float>, i32, iXLen) - define <2 x float> @test_sf_vc_v_fvx_se_e32m1(<2 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3018,8 +2556,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen, <2 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m2(<4 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3031,8 +2567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen, iXLen, <4 x float>, i32, iXLen) - define <4 x float> @test_sf_vc_v_fvx_se_e32m2(<4 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3044,8 +2578,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen, <4 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m4(<8 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3057,8 +2589,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen, iXLen, <8 x float>, i32, iXLen) - define <8 x float> @test_sf_vc_v_fvx_se_e32m4(<8 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3070,8 +2600,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen, <8 x float>, i32, iXLen) - define void @test_sf_vc_fvx_se_e32m8(<16 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3083,8 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen, iXLen, <16 x float>, i32, iXLen) - define <16 x float> @test_sf_vc_v_fvx_se_e32m8(<16 x float> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3096,8 +2622,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen, <16 x float>, i32, iXLen) - define void @test_sf_vc_fvi_se_e16mf4(<1 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3109,8 +2633,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, iXLen, <1 x half>, iXLen, iXLen) - define <1 x half> @test_sf_vc_v_fvi_se_e16mf4(<1 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3122,8 +2644,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, <1 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16mf2(<2 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3135,8 +2655,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, iXLen, <2 x half>, iXLen, iXLen) - define <2 x half> @test_sf_vc_v_fvi_se_e16mf2(<2 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3148,8 +2666,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, <2 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m1(<4 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3161,8 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, iXLen, <4 x half>, iXLen, iXLen) - define <4 x half> @test_sf_vc_v_fvi_se_e16m1(<4 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3174,8 +2688,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, <4 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m2(<8 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3187,8 +2699,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, iXLen, <8 x half>, iXLen, iXLen) - define <8 x half> @test_sf_vc_v_fvi_se_e16m2(<8 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3200,8 +2710,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, <8 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m4(<16 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3213,8 +2721,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, iXLen, <16 x half>, iXLen, iXLen) - define <16 x half> @test_sf_vc_v_fvi_se_e16m4(<16 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3226,8 +2732,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, <16 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m8(<32 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3239,8 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, iXLen, <32 x half>, iXLen, iXLen) - define <32 x half> @test_sf_vc_v_fvi_se_e16m8(<32 x half> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3252,8 +2754,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, <32 x half>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32mf2(<1 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3265,8 +2765,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, iXLen, <1 x float>, iXLen, iXLen) - define <1 x float> @test_sf_vc_v_fvi_se_e32mf2(<1 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3278,8 +2776,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, <1 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m1(<2 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3291,8 +2787,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, iXLen, <2 x float>, iXLen, iXLen) - define <2 x float> @test_sf_vc_v_fvi_se_e32m1(<2 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3304,8 +2798,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, <2 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m2(<4 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3317,8 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, iXLen, <4 x float>, iXLen, iXLen) - define <4 x float> @test_sf_vc_v_fvi_se_e32m2(<4 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3330,8 +2820,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, <4 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m4(<8 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3343,8 +2831,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, iXLen, <8 x float>, iXLen, iXLen) - define <8 x float> @test_sf_vc_v_fvi_se_e32m4(<8 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3356,8 +2842,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, <8 x float>, iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m8(<16 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3369,8 +2853,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, iXLen, <16 x float>, iXLen, iXLen) - define <16 x float> @test_sf_vc_v_fvi_se_e32m8(<16 x float> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3382,8 +2864,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, <16 x float>, iXLen, iXLen) - define void @test_sf_vc_fvf_se_e16mf4(<1 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3395,8 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen, iXLen, <1 x half>, half, iXLen) - define <1 x half> @test_sf_vc_v_fvf_se_e16mf4(<1 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3408,8 +2886,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen, <1 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16mf2(<2 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3421,8 +2897,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen, iXLen, <2 x half>, half, iXLen) - define <2 x half> @test_sf_vc_v_fvf_se_e16mf2(<2 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3434,8 +2908,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen, <2 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m1(<4 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3447,8 +2919,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen, iXLen, <4 x half>, half, iXLen) - define <4 x half> @test_sf_vc_v_fvf_se_e16m1(<4 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3460,8 +2930,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen, <4 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m2(<8 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3473,8 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen, iXLen, <8 x half>, half, iXLen) - define <8 x half> @test_sf_vc_v_fvf_se_e16m2(<8 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3486,8 +2952,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen, <8 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m4(<16 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3499,8 +2963,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen, iXLen, <16 x half>, half, iXLen) - define <16 x half> @test_sf_vc_v_fvf_se_e16m4(<16 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3512,8 +2974,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen, <16 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e16m8(<32 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3525,8 +2985,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen, iXLen, <32 x half>, half, iXLen) - define <32 x half> @test_sf_vc_v_fvf_se_e16m8(<32 x half> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3538,8 +2996,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen, <32 x half>, half, iXLen) - define void @test_sf_vc_fvf_se_e32mf2(<1 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3551,8 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen, iXLen, <1 x float>, float, iXLen) - define <1 x float> @test_sf_vc_v_fvf_se_e32mf2(<1 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3564,8 +3018,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen, <1 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m1(<2 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3577,8 +3029,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen, iXLen, <2 x float>, float, iXLen) - define <2 x float> @test_sf_vc_v_fvf_se_e32m1(<2 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3590,8 +3040,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen, <2 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m2(<4 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3603,8 +3051,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen, iXLen, <4 x float>, float, iXLen) - define <4 x float> @test_sf_vc_v_fvf_se_e32m2(<4 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3616,8 +3062,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen, <4 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m4(<8 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3629,8 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen, iXLen, <8 x float>, float, iXLen) - define <8 x float> @test_sf_vc_v_fvf_se_e32m4(<8 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3642,8 +3084,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen, <8 x float>, float, iXLen) - define void @test_sf_vc_fvf_se_e32m8(<16 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3655,8 +3095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen, iXLen, <16 x float>, float, iXLen) - define <16 x float> @test_sf_vc_v_fvf_se_e32m8(<16 x float> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3668,4 +3106,3 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen, <16 x float>, float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll index e44ff31406f4a..29b101eb754c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen) - define void @test_sf_vc_vvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen) - define void @test_sf_vc_vvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen) - define void @test_sf_vc_vvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen) - define void @test_sf_vc_vvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen) - define void @test_sf_vc_vvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen) - define void @test_sf_vc_vvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen) - define <1 x i8> @test_sf_vc_v_vvv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i8> @test_sf_vc_v_vvv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i8> @test_sf_vc_v_vvv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i8> @test_sf_vc_v_vvv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i8> @test_sf_vc_v_vvv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i8> @test_sf_vc_v_vvv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen) - define <64 x i8> @test_sf_vc_v_vvv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen) - define <1 x i16> @test_sf_vc_v_vvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i16> @test_sf_vc_v_vvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i16> @test_sf_vc_v_vvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i16> @test_sf_vc_v_vvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i16> @test_sf_vc_v_vvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen) - define <32 x i16> @test_sf_vc_v_vvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen) - define <1 x i32> @test_sf_vc_v_vvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i32> @test_sf_vc_v_vvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i32> @test_sf_vc_v_vvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i32> @test_sf_vc_v_vvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen) - define <16 x i32> @test_sf_vc_v_vvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen) - define <1 x i64> @test_sf_vc_v_vvv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m1: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen) - define <2 x i64> @test_sf_vc_v_vvv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m2: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen) - define <4 x i64> @test_sf_vc_v_vvv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m4: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen) - define <8 x i64> @test_sf_vc_v_vvv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m8: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen) - define void @test_sf_vc_xvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen) - define void @test_sf_vc_xvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen) - define void @test_sf_vc_xvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_xvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen) - define <1 x i8> @test_sf_vc_v_xvv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen) - define <2 x i8> @test_sf_vc_v_xvv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen) - define <4 x i8> @test_sf_vc_v_xvv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen) - define <8 x i8> @test_sf_vc_v_xvv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen) - define <16 x i8> @test_sf_vc_v_xvv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen) - define <32 x i8> @test_sf_vc_v_xvv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen) - define <64 x i8> @test_sf_vc_v_xvv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen) - define <1 x i16> @test_sf_vc_v_xvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen) - define <2 x i16> @test_sf_vc_v_xvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen) - define <4 x i16> @test_sf_vc_v_xvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen) - define <8 x i16> @test_sf_vc_v_xvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen) - define <16 x i16> @test_sf_vc_v_xvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen) - define <32 x i16> @test_sf_vc_v_xvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen) - define <1 x i32> @test_sf_vc_v_xvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen) - define <2 x i32> @test_sf_vc_v_xvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen) - define <4 x i32> @test_sf_vc_v_xvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen) - define <8 x i32> @test_sf_vc_v_xvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen) - define <16 x i32> @test_sf_vc_v_xvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen) - define void @test_sf_vc_ivv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_ivv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_ivv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_ivv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_ivv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) - define <1 x i8> @test_sf_vc_v_ivv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret <1 x i8> %0 } -declare <1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen) - define <2 x i8> @test_sf_vc_v_ivv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret <2 x i8> %0 } -declare <2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen) - define <4 x i8> @test_sf_vc_v_ivv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret <4 x i8> %0 } -declare <4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen) - define <8 x i8> @test_sf_vc_v_ivv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret <8 x i8> %0 } -declare <8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen) - define <16 x i8> @test_sf_vc_v_ivv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret <16 x i8> %0 } -declare <16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen) - define <32 x i8> @test_sf_vc_v_ivv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret <32 x i8> %0 } -declare <32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen) - define <64 x i8> @test_sf_vc_v_ivv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret <64 x i8> %0 } -declare <64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) - define void @test_sf_vc_fvvv_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <1 x half>, <1 x i16>, <1 x i16>, iXLen) - define <1 x half> @test_sf_vc_fv_fvv_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <1 x half>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <2 x half>, <2 x i16>, <2 x i16>, iXLen) - define <2 x half> @test_sf_vc_fv_fvv_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <2 x half>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <4 x half>, <4 x i16>, <4 x i16>, iXLen) - define <4 x half> @test_sf_vc_fv_fvv_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <4 x half>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <8 x half>, <8 x i16>, <8 x i16>, iXLen) - define <8 x half> @test_sf_vc_fv_fvv_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <8 x half>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <16 x half>, <16 x i16>, <16 x i16>, iXLen) - define <16 x half> @test_sf_vc_fv_fvv_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <16 x half>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <32 x half>, <32 x i16>, <32 x i16>, iXLen) - define <32 x half> @test_sf_vc_fv_fvv_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <32 x half>, <32 x i16>, <32 x i16>, iXLen) - define void @test_sf_vc_fvvv_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <1 x float>, <1 x i32>, <1 x i32>, iXLen) - define <1 x float> @test_sf_vc_fv_fvv_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <1 x float>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <2 x float>, <2 x i32>, <2 x i32>, iXLen) - define <2 x float> @test_sf_vc_fv_fvv_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <2 x float>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <4 x float>, <4 x i32>, <4 x i32>, iXLen) - define <4 x float> @test_sf_vc_fv_fvv_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <4 x float>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <8 x float>, <8 x i32>, <8 x i32>, iXLen) - define <8 x float> @test_sf_vc_fv_fvv_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <8 x float>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2693,8 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <16 x float>, <16 x i32>, <16 x i32>, iXLen) - define <16 x float> @test_sf_vc_fv_fvv_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2706,8 +2292,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <16 x float>, <16 x i32>, <16 x i32>, iXLen) - define void @test_sf_vc_fvvv_se_e64m1(<1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2719,8 +2303,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <1 x double>, <1 x i64>, <1 x i64>, iXLen) - define <1 x double> @test_sf_vc_fv_fvv_se_e64m1(<1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2732,8 +2314,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <1 x double>, <1 x i64>, <1 x i64>, iXLen) - define void @test_sf_vc_fvvv_se_e64m2(<2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2745,8 +2325,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <2 x double>, <2 x i64>, <2 x i64>, iXLen) - define <2 x double> @test_sf_vc_fv_fvv_se_e64m2(<2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2758,8 +2336,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <2 x double>, <2 x i64>, <2 x i64>, iXLen) - define void @test_sf_vc_fvvv_se_e64m4(<4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2771,8 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <4 x double>, <4 x i64>, <4 x i64>, iXLen) - define <4 x double> @test_sf_vc_fv_fvv_se_e64m4(<4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2784,8 +2358,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <4 x double>, <4 x i64>, <4 x i64>, iXLen) - define void @test_sf_vc_fvvv_se_e64m8(<8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2797,8 +2369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <8 x double>, <8 x i64>, <8 x i64>, iXLen) - define <8 x double> @test_sf_vc_fv_fvv_se_e64m8(<8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2810,8 +2380,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <8 x double>, <8 x i64>, <8 x i64>, iXLen) - define void @test_sf_vc_fvvx_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2823,8 +2391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x half>, <1 x i16>, i16, iXLen) - define <1 x half> @test_sf_vc_v_fvvx_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2836,8 +2402,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x half>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2849,8 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x half>, <2 x i16>, i16, iXLen) - define <2 x half> @test_sf_vc_v_fvvx_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2862,8 +2424,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x half>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2875,8 +2435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x half>, <4 x i16>, i16, iXLen) - define <4 x half> @test_sf_vc_v_fvvx_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2888,8 +2446,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x half>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2901,8 +2457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x half>, <8 x i16>, i16, iXLen) - define <8 x half> @test_sf_vc_v_fvvx_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2914,8 +2468,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x half>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2927,8 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x half>, <16 x i16>, i16, iXLen) - define <16 x half> @test_sf_vc_v_fvvx_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2940,8 +2490,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x half>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2953,8 +2501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, <32 x half>, <32 x i16>, i16, iXLen) - define <32 x half> @test_sf_vc_v_fvvx_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2966,8 +2512,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, <32 x half>, <32 x i16>, i16, iXLen) - define void @test_sf_vc_fvvx_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2979,8 +2523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x float>, <1 x i32>, i32, iXLen) - define <1 x float> @test_sf_vc_v_fvvx_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2992,8 +2534,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x float>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3005,8 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x float>, <2 x i32>, i32, iXLen) - define <2 x float> @test_sf_vc_v_fvvx_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3018,8 +2556,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x float>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3031,8 +2567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x float>, <4 x i32>, i32, iXLen) - define <4 x float> @test_sf_vc_v_fvvx_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3044,8 +2578,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x float>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3057,8 +2589,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x float>, <8 x i32>, i32, iXLen) - define <8 x float> @test_sf_vc_v_fvvx_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3070,8 +2600,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x float>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3083,8 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, <16 x float>, <16 x i32>, i32, iXLen) - define <16 x float> @test_sf_vc_v_fvvx_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3096,8 +2622,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, <16 x float>, <16 x i32>, i32, iXLen) - define void @test_sf_vc_fvvi_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3109,8 +2633,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen, iXLen) - define <1 x half> @test_sf_vc_fv_fvvi_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3122,8 +2644,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3135,8 +2655,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen, iXLen) - define <2 x half> @test_sf_vc_fv_fvvi_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3148,8 +2666,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3161,8 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen, iXLen) - define <4 x half> @test_sf_vc_fv_fvvi_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3174,8 +2688,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3187,8 +2699,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen, iXLen) - define <8 x half> @test_sf_vc_fv_fvvi_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3200,8 +2710,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3213,8 +2721,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen, iXLen) - define <16 x half> @test_sf_vc_fv_fvvi_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3226,8 +2732,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3239,8 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen, iXLen) - define <32 x half> @test_sf_vc_fv_fvvi_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3252,8 +2754,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3265,8 +2765,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen, iXLen) - define <1 x float> @test_sf_vc_fv_fvvi_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3278,8 +2776,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3291,8 +2787,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen, iXLen) - define <2 x float> @test_sf_vc_fv_fvvi_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3304,8 +2798,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3317,8 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen, iXLen) - define <4 x float> @test_sf_vc_fv_fvvi_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3330,8 +2820,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3343,8 +2831,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen, iXLen) - define <8 x float> @test_sf_vc_fv_fvvi_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3356,8 +2842,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3369,8 +2853,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen, iXLen) - define <16 x float> @test_sf_vc_fv_fvvi_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3382,8 +2864,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen, iXLen) - define void @test_sf_vc_fvvf_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3395,8 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x half>, <1 x i16>, half, iXLen) - define <1 x half> @test_sf_vc_fv_fvvf_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3408,8 +2886,6 @@ entry: ret <1 x half> %0 } -declare <1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x half>, <1 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3421,8 +2897,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x half>, <2 x i16>, half, iXLen) - define <2 x half> @test_sf_vc_fv_fvvf_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3434,8 +2908,6 @@ entry: ret <2 x half> %0 } -declare <2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x half>, <2 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3447,8 +2919,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x half>, <4 x i16>, half, iXLen) - define <4 x half> @test_sf_vc_fv_fvvf_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3460,8 +2930,6 @@ entry: ret <4 x half> %0 } -declare <4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x half>, <4 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3473,8 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x half>, <8 x i16>, half, iXLen) - define <8 x half> @test_sf_vc_fv_fvvf_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3486,8 +2952,6 @@ entry: ret <8 x half> %0 } -declare <8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x half>, <8 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3499,8 +2963,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x half>, <16 x i16>, half, iXLen) - define <16 x half> @test_sf_vc_fv_fvvf_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3512,8 +2974,6 @@ entry: ret <16 x half> %0 } -declare <16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x half>, <16 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3525,8 +2985,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, <32 x half>, <32 x i16>, half, iXLen) - define <32 x half> @test_sf_vc_fv_fvvf_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3538,8 +2996,6 @@ entry: ret <32 x half> %0 } -declare <32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, <32 x half>, <32 x i16>, half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3551,8 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x float>, <1 x i32>, float, iXLen) - define <1 x float> @test_sf_vc_fv_fvvf_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3564,8 +3018,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x float>, <1 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3577,8 +3029,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x float>, <2 x i32>, float, iXLen) - define <2 x float> @test_sf_vc_fv_fvvf_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3590,8 +3040,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x float>, <2 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3603,8 +3051,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x float>, <4 x i32>, float, iXLen) - define <4 x float> @test_sf_vc_fv_fvvf_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3616,8 +3062,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x float>, <4 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3629,8 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x float>, <8 x i32>, float, iXLen) - define <8 x float> @test_sf_vc_fv_fvvf_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3642,8 +3084,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x float>, <8 x i32>, float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3655,8 +3095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, <16 x float>, <16 x i32>, float, iXLen) - define <16 x float> @test_sf_vc_fv_fvvf_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3668,4 +3106,3 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, <16 x float>, <16 x i32>, float %rs1, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll index ea6b936843c2f..09f770dfcfed8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) - define void @test_sf_vc_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) - define <1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i16> @test_sf_vc_v_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i16> @test_sf_vc_v_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i16> @test_sf_vc_v_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) - define <1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i32> @test_sf_vc_v_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i32> @test_sf_vc_v_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i32> @test_sf_vc_v_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) - define <1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i64> @test_sf_vc_v_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i64> @test_sf_vc_v_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i64> @test_sf_vc_v_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) - define <1 x i16> @test_sf_vc_v_vvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) - define <2 x i16> @test_sf_vc_v_vvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) - define <4 x i16> @test_sf_vc_v_vvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) - define <8 x i16> @test_sf_vc_v_vvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) - define <16 x i16> @test_sf_vc_v_vvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) - define <32 x i16> @test_sf_vc_v_vvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) - define <1 x i32> @test_sf_vc_v_vvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) - define <2 x i32> @test_sf_vc_v_vvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) - define <4 x i32> @test_sf_vc_v_vvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) - define <8 x i32> @test_sf_vc_v_vvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) - define <16 x i32> @test_sf_vc_v_vvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) - define <1 x i64> @test_sf_vc_v_vvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) - define <2 x i64> @test_sf_vc_v_vvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) - define <4 x i64> @test_sf_vc_v_vvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) - define <8 x i64> @test_sf_vc_v_vvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) - define void @test_sf_vc_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) - define <1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) - define <2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) - define <4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) - define <8 x i16> @test_sf_vc_v_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) - define <16 x i16> @test_sf_vc_v_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) - define <32 x i16> @test_sf_vc_v_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) - define <1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) - define <2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) - define <4 x i32> @test_sf_vc_v_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) - define <8 x i32> @test_sf_vc_v_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) - define <16 x i32> @test_sf_vc_v_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) - define <1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) - define <2 x i64> @test_sf_vc_v_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) - define <4 x i64> @test_sf_vc_v_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) - define <8 x i64> @test_sf_vc_v_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) - define <1 x i16> @test_sf_vc_v_xvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) - define <2 x i16> @test_sf_vc_v_xvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) - define <4 x i16> @test_sf_vc_v_xvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) - define <8 x i16> @test_sf_vc_v_xvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) - define <16 x i16> @test_sf_vc_v_xvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) - define <32 x i16> @test_sf_vc_v_xvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) - define <1 x i32> @test_sf_vc_v_xvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) - define <2 x i32> @test_sf_vc_v_xvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) - define <4 x i32> @test_sf_vc_v_xvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) - define <8 x i32> @test_sf_vc_v_xvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) - define <16 x i32> @test_sf_vc_v_xvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) - define <1 x i64> @test_sf_vc_v_xvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) - define <2 x i64> @test_sf_vc_v_xvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) - define <4 x i64> @test_sf_vc_v_xvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) - define <8 x i64> @test_sf_vc_v_xvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) - define <1 x i16> @test_sf_vc_v_ivw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret <1 x i16> %0 } -declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) - define <2 x i16> @test_sf_vc_v_ivw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret <2 x i16> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) - define <4 x i16> @test_sf_vc_v_ivw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret <4 x i16> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) - define <8 x i16> @test_sf_vc_v_ivw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret <8 x i16> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) - define <16 x i16> @test_sf_vc_v_ivw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret <16 x i16> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) - define <32 x i16> @test_sf_vc_v_ivw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret <32 x i16> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) - define <1 x i32> @test_sf_vc_v_ivw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret <1 x i32> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) - define <2 x i32> @test_sf_vc_v_ivw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret <2 x i32> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) - define <4 x i32> @test_sf_vc_v_ivw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret <4 x i32> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) - define <8 x i32> @test_sf_vc_v_ivw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret <8 x i32> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) - define <16 x i32> @test_sf_vc_v_ivw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret <16 x i32> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) - define <1 x i64> @test_sf_vc_v_ivw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret <1 x i64> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) - define <2 x i64> @test_sf_vc_v_ivw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret <2 x i64> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) - define <4 x i64> @test_sf_vc_v_ivw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret <4 x i64> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) - define <8 x i64> @test_sf_vc_v_ivw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret <8 x i64> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) - define <1 x float> @test_sf_vc_fw_fwvvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) - define <2 x float> @test_sf_vc_fw_fwvvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) - define <4 x float> @test_sf_vc_fw_fwvvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) - define <8 x float> @test_sf_vc_fw_fwvvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) - define <16 x float> @test_sf_vc_fw_fwvvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) - define void @test_sf_vc_fwvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) - define <1 x double> @test_sf_vc_fw_fwvvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) - define void @test_sf_vc_fwvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) - define <2 x double> @test_sf_vc_fw_fwvvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) - define void @test_sf_vc_fwvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) - define <4 x double> @test_sf_vc_fw_fwvvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) - define void @test_sf_vc_fwvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) - define <8 x double> @test_sf_vc_fw_fwvvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) - define void @test_sf_vc_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) - define <1 x float> @test_sf_vc_w_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) - define <2 x float> @test_sf_vc_w_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) - define <4 x float> @test_sf_vc_w_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) - define <8 x float> @test_sf_vc_w_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) - define <16 x float> @test_sf_vc_w_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) - define void @test_sf_vc_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) - define <1 x double> @test_sf_vc_w_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) - define <2 x double> @test_sf_vc_w_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) - define <4 x double> @test_sf_vc_w_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) - define <8 x double> @test_sf_vc_w_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) - define void @test_sf_vc_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) - define <1 x float> @test_sf_vc_fw_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) - define <2 x float> @test_sf_vc_fw_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) - define <4 x float> @test_sf_vc_fw_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) - define <8 x float> @test_sf_vc_fw_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) - define <16 x float> @test_sf_vc_fw_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) - define <1 x double> @test_sf_vc_fw_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) - define <2 x double> @test_sf_vc_fw_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) - define <4 x double> @test_sf_vc_fw_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) - define <8 x double> @test_sf_vc_fw_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) - define void @test_sf_vc_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) - define <1 x float> @test_sf_vc_fw_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret <1 x float> %0 } -declare <1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) - define <2 x float> @test_sf_vc_fw_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret <2 x float> %0 } -declare <2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) - define <4 x float> @test_sf_vc_fw_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret <4 x float> %0 } -declare <4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) - define <8 x float> @test_sf_vc_fw_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret <8 x float> %0 } -declare <8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) - define <16 x float> @test_sf_vc_fw_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret <16 x float> %0 } -declare <16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) - define void @test_sf_vc_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) - define <1 x double> @test_sf_vc_fw_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret <1 x double> %0 } -declare <1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) - define void @test_sf_vc_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) - define <2 x double> @test_sf_vc_fw_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret <2 x double> %0 } -declare <2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) - define void @test_sf_vc_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) - define <4 x double> @test_sf_vc_fw_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret <4 x double> %0 } -declare <4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) - define void @test_sf_vc_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) - define <8 x double> @test_sf_vc_fw_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2693,4 +2281,3 @@ entry: ret <8 x double> %0 } -declare <8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll index d292978c1d5eb..10f0f7cd7ae83 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.zext.v4i16.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i16> @vzext_v4i16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i1: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vzext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.zext.v4i32.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i32> @vzext_v4i32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i1: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vzext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i1(<4 x i1>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll index 8259336e8668c..e2d9e0ac2deea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare <4 x i16> @llvm.vp.zext.v4i16.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i16> @vzext_v4i16_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i8: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define <4 x i16> @vzext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i16> %v } -declare <4 x i32> @llvm.vp.zext.v4i32.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i32> @vzext_v4i32_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i8: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define <4 x i32> @vzext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i8: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define <4 x i64> @vzext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i32> @llvm.vp.zext.v4i32.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i32> @vzext_v4i32_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i16: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define <4 x i32> @vzext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i32> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i16(<4 x i16>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i16: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define <4 x i64> @vzext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <4 x i64> @llvm.vp.zext.v4i64.v4i32(<4 x i32>, <4 x i1>, i32) - define <4 x i64> @vzext_v4i64_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i32: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define <4 x i64> @vzext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ret <4 x i64> %v } -declare <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32) - define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v32i64_v32i32: ; CHECK: # %bb.0: @@ -202,8 +188,6 @@ define <32 x i64> @vzext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl ret <32 x i64> %v } -declare <4 x i16> @llvm.vp.zext.v4i16.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i16> @vzext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i7: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define <4 x i16> @vzext_v4i16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i16> %v } -declare <4 x i8> @llvm.vp.zext.v4i8.v4i7(<4 x i7>, <4 x i1>, i32) - define <4 x i8> @vzext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i8_v4i7: ; CHECK: # %bb.0: @@ -229,8 +211,6 @@ define <4 x i8> @vzext_v4i8_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ret <4 x i8> %v } -declare <4 x i15> @llvm.vp.zext.v4i15.v4i8(<4 x i8>, <4 x i1>, i32) - define <4 x i15> @vzext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i15_v4i8: ; CHECK: # %bb.0: @@ -242,8 +222,6 @@ define <4 x i15> @vzext_v4i15_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) ret <4 x i15> %v } -declare <4 x i15> @llvm.vp.zext.v4i15.v4i9(<4 x i9>, <4 x i1>, i32) - define <4 x i15> @vzext_v4i15_v4i9(<4 x i9> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i15_v4i9: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll index 4512d809995a4..17c2244001082 100644 --- a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll @@ -8,8 +8,6 @@ ; trunc ; ================================================================================ -declare @llvm.trunc.nxv1f32() - define @trunc_nxv1f32_to_si8( %x) { ; RV32-LABEL: trunc_nxv1f32_to_si8: ; RV32: # %bb.0: @@ -202,8 +200,6 @@ define @trunc_nxv1f32_to_ui64( %x) { ; trunc ; ================================================================================ -declare @llvm.trunc.nxv4f32() - define @trunc_nxv4f32_to_si8( %x) { ; RV32-LABEL: trunc_nxv4f32_to_si8: ; RV32: # %bb.0: @@ -396,8 +392,6 @@ define @trunc_nxv4f32_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv1f32() - define @ceil_nxv1f32_to_si8( %x) { ; RV32-LABEL: ceil_nxv1f32_to_si8: ; RV32: # %bb.0: @@ -622,8 +616,6 @@ define @ceil_nxv1f32_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv4f32() - define @ceil_nxv4f32_to_si8( %x) { ; RV32-LABEL: ceil_nxv4f32_to_si8: ; RV32: # %bb.0: @@ -848,8 +840,6 @@ define @ceil_nxv4f32_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv4f32() - define @rint_nxv4f32_to_si8( %x) { ; RV32-LABEL: rint_nxv4f32_to_si8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll index 8f2aec3140e9d..e2deefa26ecb3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.floor.nxv1bf16(, , i32) - define @vp_floor_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_floor_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv2bf16(, , i32) - define @vp_floor_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_floor_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv4bf16(, , i32) - define @vp_floor_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_floor_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv8bf16(, , i32) - define @vp_floor_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_floor_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.floor.nxv16bf16(, , i32) - define @vp_floor_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_floor_nxv16bf16_unmasked( %v } -declare @llvm.vp.floor.nxv32bf16(, , i32) - define @vp_floor_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_floor_nxv32bf16_unmasked( @llvm.vp.floor.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.floor.nxv1f16(, , i32) define @vp_floor_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv1f16: @@ -490,8 +477,6 @@ define @vp_floor_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv2f16(, , i32) - define @vp_floor_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_floor_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv4f16(, , i32) - define @vp_floor_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_floor_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv8f16(, , i32) - define @vp_floor_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_floor_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.floor.nxv16f16(, , i32) - define @vp_floor_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_floor_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.floor.nxv32f16(, , i32) - define @vp_floor_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_floor_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.floor.nxv1f32(, , i32) - define @vp_floor_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_floor_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv2f32(, , i32) - define @vp_floor_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_floor_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv4f32(, , i32) - define @vp_floor_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_floor_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv8f32(, , i32) - define @vp_floor_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_floor_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.floor.nxv16f32(, , i32) - define @vp_floor_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_floor_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.floor.nxv1f64(, , i32) - define @vp_floor_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_floor_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv2f64(, , i32) - define @vp_floor_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_floor_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv4f64(, , i32) - define @vp_floor_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_floor_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv7f64(, , i32) - define @vp_floor_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_floor_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.floor.nxv8f64(, , i32) - define @vp_floor_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_floor_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.floor.nxv16f64(, , i32) define @vp_floor_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_floor_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll index 3faaf210086cb..25a4eb74eeba7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll @@ -16,8 +16,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs -early-live-intervals < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.maximum.nxv1bf16(, ) - define @vfmax_nxv1bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1bf16_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @vfmax_nxv1bf16_vv( %a, %v } -declare @llvm.maximum.nxv2bf16(, ) - define @vfmax_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define @vfmax_nxv2bf16_vv( %a, %v } -declare @llvm.maximum.nxv4bf16(, ) - define @vfmax_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define @vfmax_nxv4bf16_vv( %a, %v } -declare @llvm.maximum.nxv8bf16(, ) - define @vfmax_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfmax_nxv8bf16_vv( %a, %v } -declare @llvm.maximum.nxv16bf16(, ) - define @vfmax_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -130,8 +120,6 @@ define @vfmax_nxv16bf16_vv( %a, %v } -declare @llvm.maximum.nxv32bf16(, ) - define @vfmax_nxv32bf16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmax_nxv32bf16_vv: ; ZVFH: # %bb.0: @@ -286,8 +274,6 @@ define @vfmax_nxv32bf16_vv( %a, %v } -declare @llvm.maximum.nxv1f16(, ) - define @vfmax_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -319,8 +305,6 @@ define @vfmax_nxv1f16_vv( %a, %v } -declare @llvm.maximum.nxv2f16(, ) - define @vfmax_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -352,8 +336,6 @@ define @vfmax_nxv2f16_vv( %a, %v } -declare @llvm.maximum.nxv4f16(, ) - define @vfmax_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -385,8 +367,6 @@ define @vfmax_nxv4f16_vv( %a, %v } -declare @llvm.maximum.nxv8f16(, ) - define @vfmax_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -418,8 +398,6 @@ define @vfmax_nxv8f16_vv( %a, %v } -declare @llvm.maximum.nxv16f16(, ) - define @vfmax_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -450,8 +428,6 @@ define @vfmax_nxv16f16_vv( %a, %v } -declare @llvm.maximum.nxv32f16(, ) - define @vfmax_nxv32f16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmax_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -540,8 +516,6 @@ define @vfmax_nxv32f16_vv( %a, %v } -declare @llvm.maximum.nxv1f32(, ) - define @vfmax_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f32_vv: ; CHECK: # %bb.0: @@ -556,8 +530,6 @@ define @vfmax_nxv1f32_vv( %a, %v } -declare @llvm.maximum.nxv2f32(, ) - define @vfmax_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f32_vv: ; CHECK: # %bb.0: @@ -572,8 +544,6 @@ define @vfmax_nxv2f32_vv( %a, %v } -declare @llvm.maximum.nxv4f32(, ) - define @vfmax_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f32_vv: ; CHECK: # %bb.0: @@ -588,8 +558,6 @@ define @vfmax_nxv4f32_vv( %a, %v } -declare @llvm.maximum.nxv8f32(, ) - define @vfmax_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f32_vv: ; CHECK: # %bb.0: @@ -604,8 +572,6 @@ define @vfmax_nxv8f32_vv( %a, %v } -declare @llvm.maximum.nxv16f32(, ) - define @vfmax_nxv16f32_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmax_nxv16f32_vv: ; CHECK: # %bb.0: @@ -621,8 +587,6 @@ define @vfmax_nxv16f32_vv( %a, %v } -declare @llvm.maximum.nxv1f64(, ) - define @vfmax_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f64_vv: ; CHECK: # %bb.0: @@ -637,8 +601,6 @@ define @vfmax_nxv1f64_vv( %a, %v } -declare @llvm.maximum.nxv2f64(, ) - define @vfmax_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f64_vv: ; CHECK: # %bb.0: @@ -653,8 +615,6 @@ define @vfmax_nxv2f64_vv( %a, %v } -declare @llvm.maximum.nxv4f64(, ) - define @vfmax_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f64_vv: ; CHECK: # %bb.0: @@ -669,8 +629,6 @@ define @vfmax_nxv4f64_vv( %a, %v } -declare @llvm.maximum.nxv8f64(, ) - define @vfmax_nxv8f64_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmax_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll index f6b94b41103ef..0e0c92b150d33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll @@ -14,8 +14,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.maximum.nxv1bf16(, , , i32) - define @vfmax_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -59,8 +57,6 @@ define @vfmax_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv2bf16(, , , i32) - define @vfmax_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define @vfmax_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv4bf16(, , , i32) - define @vfmax_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define @vfmax_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv8bf16(, , , i32) - define @vfmax_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -198,8 +190,6 @@ define @vfmax_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.maximum.nxv16bf16(, , , i32) - define @vfmax_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -259,8 +249,6 @@ define @vfmax_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.maximum.nxv32bf16(, , , i32) - define @vfmax_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -498,7 +486,6 @@ define @vfmax_vv_nxv32bf16_unmasked( @llvm.vp.maximum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.maximum.nxv1f16(, , , i32) define @vfmax_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv1f16: @@ -566,8 +553,6 @@ define @vfmax_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv2f16(, , , i32) - define @vfmax_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -634,8 +619,6 @@ define @vfmax_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv4f16(, , , i32) - define @vfmax_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -704,8 +687,6 @@ define @vfmax_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv8f16(, , , i32) - define @vfmax_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -776,8 +757,6 @@ define @vfmax_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.maximum.nxv16f16(, , , i32) - define @vfmax_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -862,8 +841,6 @@ define @vfmax_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.maximum.nxv32f16(, , , i32) - define @vfmax_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1143,8 +1120,6 @@ define @vfmax_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.maximum.nxv1f32(, , , i32) - define @vfmax_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1176,8 +1151,6 @@ define @vfmax_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv2f32(, , , i32) - define @vfmax_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1209,8 +1182,6 @@ define @vfmax_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv4f32(, , , i32) - define @vfmax_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1244,8 +1215,6 @@ define @vfmax_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv8f32(, , , i32) - define @vfmax_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1279,8 +1248,6 @@ define @vfmax_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.maximum.nxv1f64(, , , i32) - define @vfmax_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1312,8 +1279,6 @@ define @vfmax_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv2f64(, , , i32) - define @vfmax_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1347,8 +1312,6 @@ define @vfmax_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv4f64(, , , i32) - define @vfmax_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1382,8 +1345,6 @@ define @vfmax_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv8f64(, , , i32) - define @vfmax_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1433,8 +1394,6 @@ define @vfmax_vv_nxv8f64_unmasked( %v ret %v } -declare @llvm.vp.maximum.nxv16f64(, , , i32) - define @vfmax_vv_nxv16f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll index 919d63ca6e31a..6ffa71c6c908b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll @@ -16,8 +16,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs -early-live-intervals < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.minimum.nxv1bf16(, ) - define @vfmin_nxv1bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1bf16_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @vfmin_nxv1bf16_vv( %a, %v } -declare @llvm.minimum.nxv2bf16(, ) - define @vfmin_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -62,8 +58,6 @@ define @vfmin_nxv2bf16_vv( %a, %v } -declare @llvm.minimum.nxv4bf16(, ) - define @vfmin_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define @vfmin_nxv4bf16_vv( %a, %v } -declare @llvm.minimum.nxv8bf16(, ) - define @vfmin_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfmin_nxv8bf16_vv( %a, %v } -declare @llvm.minimum.nxv16bf16(, ) - define @vfmin_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -130,8 +120,6 @@ define @vfmin_nxv16bf16_vv( %a, %v } -declare @llvm.minimum.nxv32bf16(, ) - define @vfmin_nxv32bf16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmin_nxv32bf16_vv: ; ZVFH: # %bb.0: @@ -286,8 +274,6 @@ define @vfmin_nxv32bf16_vv( %a, %v } -declare @llvm.minimum.nxv1f16(, ) - define @vfmin_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -319,8 +305,6 @@ define @vfmin_nxv1f16_vv( %a, %v } -declare @llvm.minimum.nxv2f16(, ) - define @vfmin_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -352,8 +336,6 @@ define @vfmin_nxv2f16_vv( %a, %v } -declare @llvm.minimum.nxv4f16(, ) - define @vfmin_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -385,8 +367,6 @@ define @vfmin_nxv4f16_vv( %a, %v } -declare @llvm.minimum.nxv8f16(, ) - define @vfmin_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -418,8 +398,6 @@ define @vfmin_nxv8f16_vv( %a, %v } -declare @llvm.minimum.nxv16f16(, ) - define @vfmin_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -450,8 +428,6 @@ define @vfmin_nxv16f16_vv( %a, %v } -declare @llvm.minimum.nxv32f16(, ) - define @vfmin_nxv32f16_vv( %a, %b) nounwind { ; ZVFH-LABEL: vfmin_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -540,8 +516,6 @@ define @vfmin_nxv32f16_vv( %a, %v } -declare @llvm.minimum.nxv1f32(, ) - define @vfmin_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f32_vv: ; CHECK: # %bb.0: @@ -556,8 +530,6 @@ define @vfmin_nxv1f32_vv( %a, %v } -declare @llvm.minimum.nxv2f32(, ) - define @vfmin_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f32_vv: ; CHECK: # %bb.0: @@ -572,8 +544,6 @@ define @vfmin_nxv2f32_vv( %a, %v } -declare @llvm.minimum.nxv4f32(, ) - define @vfmin_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f32_vv: ; CHECK: # %bb.0: @@ -588,8 +558,6 @@ define @vfmin_nxv4f32_vv( %a, %v } -declare @llvm.minimum.nxv8f32(, ) - define @vfmin_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f32_vv: ; CHECK: # %bb.0: @@ -604,8 +572,6 @@ define @vfmin_nxv8f32_vv( %a, %v } -declare @llvm.minimum.nxv16f32(, ) - define @vfmin_nxv16f32_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmin_nxv16f32_vv: ; CHECK: # %bb.0: @@ -621,8 +587,6 @@ define @vfmin_nxv16f32_vv( %a, %v } -declare @llvm.minimum.nxv1f64(, ) - define @vfmin_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f64_vv: ; CHECK: # %bb.0: @@ -637,8 +601,6 @@ define @vfmin_nxv1f64_vv( %a, %v } -declare @llvm.minimum.nxv2f64(, ) - define @vfmin_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f64_vv: ; CHECK: # %bb.0: @@ -653,8 +615,6 @@ define @vfmin_nxv2f64_vv( %a, %v } -declare @llvm.minimum.nxv4f64(, ) - define @vfmin_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f64_vv: ; CHECK: # %bb.0: @@ -669,8 +629,6 @@ define @vfmin_nxv4f64_vv( %a, %v } -declare @llvm.minimum.nxv8f64(, ) - define @vfmin_nxv8f64_vv( %a, %b) nounwind { ; CHECK-LABEL: vfmin_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll index dc2dec55c4a1a..86ed239e99373 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll @@ -14,8 +14,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.minimum.nxv1bf16(, , , i32) - define @vfmin_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -59,8 +57,6 @@ define @vfmin_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv2bf16(, , , i32) - define @vfmin_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define @vfmin_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv4bf16(, , , i32) - define @vfmin_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -151,8 +145,6 @@ define @vfmin_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv8bf16(, , , i32) - define @vfmin_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -198,8 +190,6 @@ define @vfmin_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.minimum.nxv16bf16(, , , i32) - define @vfmin_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -259,8 +249,6 @@ define @vfmin_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.minimum.nxv32bf16(, , , i32) - define @vfmin_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -498,7 +486,6 @@ define @vfmin_vv_nxv32bf16_unmasked( @llvm.vp.minimum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.minimum.nxv1f16(, , , i32) define @vfmin_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv1f16: @@ -566,8 +553,6 @@ define @vfmin_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv2f16(, , , i32) - define @vfmin_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -634,8 +619,6 @@ define @vfmin_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv4f16(, , , i32) - define @vfmin_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -704,8 +687,6 @@ define @vfmin_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv8f16(, , , i32) - define @vfmin_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -776,8 +757,6 @@ define @vfmin_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.minimum.nxv16f16(, , , i32) - define @vfmin_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -862,8 +841,6 @@ define @vfmin_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.minimum.nxv32f16(, , , i32) - define @vfmin_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1143,8 +1120,6 @@ define @vfmin_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.minimum.nxv1f32(, , , i32) - define @vfmin_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1176,8 +1151,6 @@ define @vfmin_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv2f32(, , , i32) - define @vfmin_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1209,8 +1182,6 @@ define @vfmin_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv4f32(, , , i32) - define @vfmin_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1244,8 +1215,6 @@ define @vfmin_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv8f32(, , , i32) - define @vfmin_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1279,8 +1248,6 @@ define @vfmin_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.minimum.nxv1f64(, , , i32) - define @vfmin_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1312,8 +1279,6 @@ define @vfmin_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv2f64(, , , i32) - define @vfmin_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1347,8 +1312,6 @@ define @vfmin_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv4f64(, , , i32) - define @vfmin_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1382,8 +1345,6 @@ define @vfmin_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv8f64(, , , i32) - define @vfmin_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1433,8 +1394,6 @@ define @vfmin_vv_nxv8f64_unmasked( %v ret %v } -declare @llvm.vp.minimum.nxv16f64(, , , i32) - define @vfmin_vv_nxv16f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll index 409235f7e1b2c..6c5b6ff31a24b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare @llvm.experimental.constrained.nearbyint.nxv1f16(, metadata, metadata) - define @nearbyint_nxv1f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv1f16: ; CHECK: # %bb.0: @@ -29,8 +27,6 @@ define @nearbyint_nxv1f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv2f16(, metadata, metadata) - define @nearbyint_nxv2f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv2f16: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define @nearbyint_nxv2f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv4f16(, metadata, metadata) - define @nearbyint_nxv4f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv4f16: ; CHECK: # %bb.0: @@ -79,8 +73,6 @@ define @nearbyint_nxv4f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv8f16(, metadata, metadata) - define @nearbyint_nxv8f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv8f16: ; CHECK: # %bb.0: @@ -104,8 +96,6 @@ define @nearbyint_nxv8f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv16f16(, metadata, metadata) - define @nearbyint_nxv16f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv16f16: ; CHECK: # %bb.0: @@ -129,8 +119,6 @@ define @nearbyint_nxv16f16( %v) strictf ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv32f16(, metadata, metadata) - define @nearbyint_nxv32f16( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv32f16: ; CHECK: # %bb.0: @@ -154,8 +142,6 @@ define @nearbyint_nxv32f16( %v) strictf ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv1f32(, metadata, metadata) - define @nearbyint_nxv1f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv1f32: ; CHECK: # %bb.0: @@ -178,8 +164,6 @@ define @nearbyint_nxv1f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv2f32(, metadata, metadata) - define @nearbyint_nxv2f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv2f32: ; CHECK: # %bb.0: @@ -202,8 +186,6 @@ define @nearbyint_nxv2f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv4f32(, metadata, metadata) - define @nearbyint_nxv4f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv4f32: ; CHECK: # %bb.0: @@ -226,8 +208,6 @@ define @nearbyint_nxv4f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv8f32(, metadata, metadata) - define @nearbyint_nxv8f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv8f32: ; CHECK: # %bb.0: @@ -250,8 +230,6 @@ define @nearbyint_nxv8f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv16f32(, metadata, metadata) - define @nearbyint_nxv16f32( %v) strictfp { ; CHECK-LABEL: nearbyint_nxv16f32: ; CHECK: # %bb.0: @@ -274,8 +252,6 @@ define @nearbyint_nxv16f32( %v) stric ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv1f64(, metadata, metadata) - define @nearbyint_nxv1f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv1f64: ; RV32: # %bb.0: @@ -317,8 +293,6 @@ define @nearbyint_nxv1f64( %v) strict ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv2f64(, metadata, metadata) - define @nearbyint_nxv2f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv2f64: ; RV32: # %bb.0: @@ -360,8 +334,6 @@ define @nearbyint_nxv2f64( %v) strict ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv4f64(, metadata, metadata) - define @nearbyint_nxv4f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv4f64: ; RV32: # %bb.0: @@ -403,8 +375,6 @@ define @nearbyint_nxv4f64( %v) strict ret %r } -declare @llvm.experimental.constrained.nearbyint.nxv8f64(, metadata, metadata) - define @nearbyint_nxv8f64( %v) strictfp { ; RV32-LABEL: nearbyint_nxv8f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll index 97e65f4e4b53a..8bfc002fa629b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll @@ -202,7 +202,6 @@ define @nearbyint_nxv1f16( %x) { %a = call @llvm.nearbyint.nxv1f16( %x) ret %a } -declare @llvm.nearbyint.nxv1f16() define @nearbyint_nxv2f16( %x) { ; ZVFH-LABEL: nearbyint_nxv2f16: @@ -242,7 +241,6 @@ define @nearbyint_nxv2f16( %x) { %a = call @llvm.nearbyint.nxv2f16( %x) ret %a } -declare @llvm.nearbyint.nxv2f16() define @nearbyint_nxv4f16( %x) { ; ZVFH-LABEL: nearbyint_nxv4f16: @@ -282,7 +280,6 @@ define @nearbyint_nxv4f16( %x) { %a = call @llvm.nearbyint.nxv4f16( %x) ret %a } -declare @llvm.nearbyint.nxv4f16() define @nearbyint_nxv8f16( %x) { ; ZVFH-LABEL: nearbyint_nxv8f16: @@ -322,7 +319,6 @@ define @nearbyint_nxv8f16( %x) { %a = call @llvm.nearbyint.nxv8f16( %x) ret %a } -declare @llvm.nearbyint.nxv8f16() define @nearbyint_nxv16f16( %x) { ; ZVFH-LABEL: nearbyint_nxv16f16: @@ -362,7 +358,6 @@ define @nearbyint_nxv16f16( %x) { %a = call @llvm.nearbyint.nxv16f16( %x) ret %a } -declare @llvm.nearbyint.nxv16f16() define @nearbyint_nxv32f16( %x) { ; ZVFH-LABEL: nearbyint_nxv32f16: @@ -416,7 +411,6 @@ define @nearbyint_nxv32f16( %x) { %a = call @llvm.nearbyint.nxv32f16( %x) ret %a } -declare @llvm.nearbyint.nxv32f16() define @nearbyint_nxv1f32( %x) { ; CHECK-LABEL: nearbyint_nxv1f32: @@ -436,7 +430,6 @@ define @nearbyint_nxv1f32( %x) { %a = call @llvm.nearbyint.nxv1f32( %x) ret %a } -declare @llvm.nearbyint.nxv1f32() define @nearbyint_nxv2f32( %x) { ; CHECK-LABEL: nearbyint_nxv2f32: @@ -456,7 +449,6 @@ define @nearbyint_nxv2f32( %x) { %a = call @llvm.nearbyint.nxv2f32( %x) ret %a } -declare @llvm.nearbyint.nxv2f32() define @nearbyint_nxv4f32( %x) { ; CHECK-LABEL: nearbyint_nxv4f32: @@ -476,7 +468,6 @@ define @nearbyint_nxv4f32( %x) { %a = call @llvm.nearbyint.nxv4f32( %x) ret %a } -declare @llvm.nearbyint.nxv4f32() define @nearbyint_nxv8f32( %x) { ; CHECK-LABEL: nearbyint_nxv8f32: @@ -496,7 +487,6 @@ define @nearbyint_nxv8f32( %x) { %a = call @llvm.nearbyint.nxv8f32( %x) ret %a } -declare @llvm.nearbyint.nxv8f32() define @nearbyint_nxv16f32( %x) { ; CHECK-LABEL: nearbyint_nxv16f32: @@ -516,7 +506,6 @@ define @nearbyint_nxv16f32( %x) { %a = call @llvm.nearbyint.nxv16f32( %x) ret %a } -declare @llvm.nearbyint.nxv16f32() define @nearbyint_nxv1f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv1f64: @@ -583,7 +572,6 @@ define @nearbyint_nxv1f64( %x) { %a = call @llvm.nearbyint.nxv1f64( %x) ret %a } -declare @llvm.nearbyint.nxv1f64() define @nearbyint_nxv2f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv2f64: @@ -650,7 +638,6 @@ define @nearbyint_nxv2f64( %x) { %a = call @llvm.nearbyint.nxv2f64( %x) ret %a } -declare @llvm.nearbyint.nxv2f64() define @nearbyint_nxv4f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv4f64: @@ -717,7 +704,6 @@ define @nearbyint_nxv4f64( %x) { %a = call @llvm.nearbyint.nxv4f64( %x) ret %a } -declare @llvm.nearbyint.nxv4f64() define @nearbyint_nxv8f64( %x) { ; RV32ZVFH-LABEL: nearbyint_nxv8f64: @@ -784,4 +770,3 @@ define @nearbyint_nxv8f64( %x) { %a = call @llvm.nearbyint.nxv8f64( %x) ret %a } -declare @llvm.nearbyint.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll index 143545ccfd4f6..665ae1960affd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll @@ -324,25 +324,6 @@ entry: ret float %res } -; Function Attrs: nofree nosync nounwind readnone willreturn -declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) -declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) -declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>) -declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) -declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) -declare i64 @llvm.umax.i64(i64, i64) -declare i64 @llvm.umin.i64(i64, i64) -declare i64 @llvm.smax.i64(i64, i64) -declare i64 @llvm.smin.i64(i64, i64) -declare float @llvm.maxnum.f32(float ,float) -declare float @llvm.minnum.f32(float ,float) - define void @crash(<2 x i32> %0) { ; CHECK-LABEL: crash: ; CHECK: # %bb.0: # %entry @@ -364,7 +345,6 @@ entry: store i8 %conv18.us, ptr null, align 1 ret void } -declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) define i64 @op_then_reduce(<4 x i64> %v, <4 x i64> %v2) { ; CHECK-LABEL: op_then_reduce: @@ -382,7 +362,6 @@ entry: ret i64 %res } - define i64 @two_reduce_scalar_bypass(<4 x i64> %v, <4 x i64> %v2) { ; CHECK-LABEL: two_reduce_scalar_bypass: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll index 1d4d554d3a47d..9e0bf5f6f5261 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) -declare @llvm.vp.fadd.nxv1f64( %x, %y, %m, i32 %vl) - ; (fadd (fmul x, y), z)) -> (fma x, y, z) define @fma( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma: diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll index ab9adda516c07..de6ada6810bd8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll @@ -1,10 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) -declare @llvm.vp.fsub.nxv1f64( %x, %y, %m, i32 %vl) -declare @llvm.vp.fneg.nxv1f64( %x, %m, i32 %vl) - ; (fsub (fmul x, y), z)) -> (fma x, y, (fneg z)) define @test1( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll index f597762521006..2add54fa35539 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll @@ -3569,8 +3569,6 @@ entry: ret <2 x i64> %conv6 } - - ; i32 saturate define <2 x i32> @stest_f64i32_mm(<2 x double> %x) { @@ -7214,21 +7212,3 @@ entry: ret <4 x i32> %spec.store.select7 } -declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) -declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>) -declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>) -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) -declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) -declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) -declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) -declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) -declare <2 x i128> @llvm.smin.v2i128(<2 x i128>, <2 x i128>) -declare <2 x i128> @llvm.smax.v2i128(<2 x i128>, <2 x i128>) -declare <2 x i128> @llvm.umin.v2i128(<2 x i128>, <2 x i128>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll index bc45671077106..c09d38d3347b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll @@ -6,14 +6,6 @@ ; Float -declare @llvm.fptosi.sat.nxv2f32.nxv2i32() -declare @llvm.fptosi.sat.nxv4f32.nxv4i32() -declare @llvm.fptosi.sat.nxv8f32.nxv8i32() -declare @llvm.fptosi.sat.nxv4f32.nxv4i16() -declare @llvm.fptosi.sat.nxv8f32.nxv8i16() -declare @llvm.fptosi.sat.nxv2f32.nxv2i64() -declare @llvm.fptosi.sat.nxv4f32.nxv4i64() - define @test_signed_v2f32_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i32: ; CHECK: # %bb.0: @@ -104,14 +96,6 @@ define @test_signed_v4f32_v4i64( %f) { ; Double -declare @llvm.fptosi.sat.nxv2f64.nxv2i32() -declare @llvm.fptosi.sat.nxv4f64.nxv4i32() -declare @llvm.fptosi.sat.nxv8f64.nxv8i32() -declare @llvm.fptosi.sat.nxv4f64.nxv4i16() -declare @llvm.fptosi.sat.nxv8f64.nxv8i16() -declare @llvm.fptosi.sat.nxv2f64.nxv2i64() -declare @llvm.fptosi.sat.nxv4f64.nxv4i64() - define @test_signed_v2f64_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i32: ; CHECK: # %bb.0: @@ -205,17 +189,8 @@ define @test_signed_v4f64_v4i64( %f) { ret %x } - ; half -declare @llvm.fptosi.sat.nxv2f16.nxv2i32() -declare @llvm.fptosi.sat.nxv4f16.nxv4i32() -declare @llvm.fptosi.sat.nxv8f16.nxv8i32() -declare @llvm.fptosi.sat.nxv4f16.nxv4i16() -declare @llvm.fptosi.sat.nxv8f16.nxv8i16() -declare @llvm.fptosi.sat.nxv2f16.nxv2i64() -declare @llvm.fptosi.sat.nxv4f16.nxv4i64() - define @test_signed_v2f16_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll index bb5ad6ba9d88a..ff7b81dbf61fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll @@ -6,14 +6,6 @@ ; Float -declare @llvm.fptoui.sat.nxv2f32.nxv2i32() -declare @llvm.fptoui.sat.nxv4f32.nxv4i32() -declare @llvm.fptoui.sat.nxv8f32.nxv8i32() -declare @llvm.fptoui.sat.nxv4f32.nxv4i16() -declare @llvm.fptoui.sat.nxv8f32.nxv8i16() -declare @llvm.fptoui.sat.nxv2f32.nxv2i64() -declare @llvm.fptoui.sat.nxv4f32.nxv4i64() - define @test_signed_v2f32_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i32: ; CHECK: # %bb.0: @@ -104,14 +96,6 @@ define @test_signed_v4f32_v4i64( %f) { ; Double -declare @llvm.fptoui.sat.nxv2f64.nxv2i32() -declare @llvm.fptoui.sat.nxv4f64.nxv4i32() -declare @llvm.fptoui.sat.nxv8f64.nxv8i32() -declare @llvm.fptoui.sat.nxv4f64.nxv4i16() -declare @llvm.fptoui.sat.nxv8f64.nxv8i16() -declare @llvm.fptoui.sat.nxv2f64.nxv2i64() -declare @llvm.fptoui.sat.nxv4f64.nxv4i64() - define @test_signed_v2f64_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i32: ; CHECK: # %bb.0: @@ -205,17 +189,8 @@ define @test_signed_v4f64_v4i64( %f) { ret %x } - ; half -declare @llvm.fptoui.sat.nxv2f16.nxv2i32() -declare @llvm.fptoui.sat.nxv4f16.nxv4i32() -declare @llvm.fptoui.sat.nxv8f16.nxv8i32() -declare @llvm.fptoui.sat.nxv4f16.nxv4i16() -declare @llvm.fptoui.sat.nxv8f16.nxv8i16() -declare @llvm.fptoui.sat.nxv2f16.nxv2i64() -declare @llvm.fptoui.sat.nxv4f16.nxv4i64() - define @test_signed_v2f16_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll index 5c592dd1a2d68..b1b8aac29b058 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll @@ -5,11 +5,6 @@ ; Done as a MIR test because eliminateFrameIndex will likely turn it ; back into an addi. -declare void @llvm.riscv.vse.nxv1i64( - , - ptr, - i64); - define i64 @test( %0) nounwind { ; CHECK-LABEL: name: test ; CHECK: bb.0.entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll index 5ed921d39590d..e7a856855c505 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll @@ -184,7 +184,6 @@ define @rint_nxv1f16( %x) { %a = call @llvm.rint.nxv1f16( %x) ret %a } -declare @llvm.rint.nxv1f16() define @rint_nxv2f16( %x) { ; ZVFH-LABEL: rint_nxv2f16: @@ -220,7 +219,6 @@ define @rint_nxv2f16( %x) { %a = call @llvm.rint.nxv2f16( %x) ret %a } -declare @llvm.rint.nxv2f16() define @rint_nxv4f16( %x) { ; ZVFH-LABEL: rint_nxv4f16: @@ -256,7 +254,6 @@ define @rint_nxv4f16( %x) { %a = call @llvm.rint.nxv4f16( %x) ret %a } -declare @llvm.rint.nxv4f16() define @rint_nxv8f16( %x) { ; ZVFH-LABEL: rint_nxv8f16: @@ -292,7 +289,6 @@ define @rint_nxv8f16( %x) { %a = call @llvm.rint.nxv8f16( %x) ret %a } -declare @llvm.rint.nxv8f16() define @rint_nxv16f16( %x) { ; ZVFH-LABEL: rint_nxv16f16: @@ -328,7 +324,6 @@ define @rint_nxv16f16( %x) { %a = call @llvm.rint.nxv16f16( %x) ret %a } -declare @llvm.rint.nxv16f16() define @rint_nxv32f16( %x) { ; ZVFH-LABEL: rint_nxv32f16: @@ -376,7 +371,6 @@ define @rint_nxv32f16( %x) { %a = call @llvm.rint.nxv32f16( %x) ret %a } -declare @llvm.rint.nxv32f16() define @rint_nxv1f32( %x) { ; CHECK-LABEL: rint_nxv1f32: @@ -394,7 +388,6 @@ define @rint_nxv1f32( %x) { %a = call @llvm.rint.nxv1f32( %x) ret %a } -declare @llvm.rint.nxv1f32() define @rint_nxv2f32( %x) { ; CHECK-LABEL: rint_nxv2f32: @@ -412,7 +405,6 @@ define @rint_nxv2f32( %x) { %a = call @llvm.rint.nxv2f32( %x) ret %a } -declare @llvm.rint.nxv2f32() define @rint_nxv4f32( %x) { ; CHECK-LABEL: rint_nxv4f32: @@ -430,7 +422,6 @@ define @rint_nxv4f32( %x) { %a = call @llvm.rint.nxv4f32( %x) ret %a } -declare @llvm.rint.nxv4f32() define @rint_nxv8f32( %x) { ; CHECK-LABEL: rint_nxv8f32: @@ -448,7 +439,6 @@ define @rint_nxv8f32( %x) { %a = call @llvm.rint.nxv8f32( %x) ret %a } -declare @llvm.rint.nxv8f32() define @rint_nxv16f32( %x) { ; CHECK-LABEL: rint_nxv16f32: @@ -466,7 +456,6 @@ define @rint_nxv16f32( %x) { %a = call @llvm.rint.nxv16f32( %x) ret %a } -declare @llvm.rint.nxv16f32() define @rint_nxv1f64( %x) { ; RV32ZVFH-LABEL: rint_nxv1f64: @@ -525,7 +514,6 @@ define @rint_nxv1f64( %x) { %a = call @llvm.rint.nxv1f64( %x) ret %a } -declare @llvm.rint.nxv1f64() define @rint_nxv2f64( %x) { ; RV32ZVFH-LABEL: rint_nxv2f64: @@ -584,7 +572,6 @@ define @rint_nxv2f64( %x) { %a = call @llvm.rint.nxv2f64( %x) ret %a } -declare @llvm.rint.nxv2f64() define @rint_nxv4f64( %x) { ; RV32ZVFH-LABEL: rint_nxv4f64: @@ -643,7 +630,6 @@ define @rint_nxv4f64( %x) { %a = call @llvm.rint.nxv4f64( %x) ret %a } -declare @llvm.rint.nxv4f64() define @rint_nxv8f64( %x) { ; RV32ZVFH-LABEL: rint_nxv8f64: @@ -702,4 +688,3 @@ define @rint_nxv8f64( %x) { %a = call @llvm.rint.nxv8f64( %x) ret %a } -declare @llvm.rint.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll index e5c5a83e9b2fd..c057d80a36be3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frm-insert.ll @@ -3,12 +3,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -target-abi=lp64d \ ; RUN: -riscv-disable-frm-insert-opt < %s | FileCheck %s --check-prefix=UNOPT -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - , - i64, i64) - ; Test only save/restore frm once. define @test( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: test: @@ -452,7 +446,6 @@ entry: ; Test restoring frm before reading frm and doing nothing with following ; dynamic rounding mode operations. ; TODO: The frrm could be elided. -declare i32 @llvm.get.rounding() define @test5( %0, %1, i64 %2, ptr %p) nounwind { ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry @@ -502,7 +495,6 @@ entry: } ; Test not set FRM for vfadd with DYN after WriteFRMImm. -declare void @llvm.set.rounding(i32) define @after_fsrm1( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: after_fsrm1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll index 295c264e7d924..91897ef7fbac3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll @@ -28,7 +28,6 @@ define @round_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv1f16(, metadata) define @round_nxv2f16( %x) strictfp { ; CHECK-LABEL: round_nxv2f16: @@ -52,7 +51,6 @@ define @round_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv2f16(, metadata) define @round_nxv4f16( %x) strictfp { ; CHECK-LABEL: round_nxv4f16: @@ -76,7 +74,6 @@ define @round_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv4f16(, metadata) define @round_nxv8f16( %x) strictfp { ; CHECK-LABEL: round_nxv8f16: @@ -100,7 +97,6 @@ define @round_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv8f16(, metadata) define @round_nxv16f16( %x) strictfp { ; CHECK-LABEL: round_nxv16f16: @@ -124,7 +120,6 @@ define @round_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv16f16(, metadata) define @round_nxv32f16( %x) strictfp { ; CHECK-LABEL: round_nxv32f16: @@ -148,7 +143,6 @@ define @round_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv32f16(, metadata) define @round_nxv1f32( %x) strictfp { ; CHECK-LABEL: round_nxv1f32: @@ -171,7 +165,6 @@ define @round_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv1f32(, metadata) define @round_nxv2f32( %x) strictfp { ; CHECK-LABEL: round_nxv2f32: @@ -194,7 +187,6 @@ define @round_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv2f32(, metadata) define @round_nxv4f32( %x) strictfp { ; CHECK-LABEL: round_nxv4f32: @@ -217,7 +209,6 @@ define @round_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv4f32(, metadata) define @round_nxv8f32( %x) strictfp { ; CHECK-LABEL: round_nxv8f32: @@ -240,7 +231,6 @@ define @round_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv8f32(, metadata) define @round_nxv16f32( %x) strictfp { ; CHECK-LABEL: round_nxv16f32: @@ -263,7 +253,6 @@ define @round_nxv16f32( %x) strictfp %a = call @llvm.experimental.constrained.round.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv16f32(, metadata) define @round_nxv1f64( %x) strictfp { ; RV32-LABEL: round_nxv1f64: @@ -305,7 +294,6 @@ define @round_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv1f64(, metadata) define @round_nxv2f64( %x) strictfp { ; RV32-LABEL: round_nxv2f64: @@ -347,7 +335,6 @@ define @round_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv2f64(, metadata) define @round_nxv4f64( %x) strictfp { ; RV32-LABEL: round_nxv4f64: @@ -389,7 +376,6 @@ define @round_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv4f64(, metadata) define @round_nxv8f64( %x) strictfp { ; RV32-LABEL: round_nxv8f64: @@ -431,4 +417,3 @@ define @round_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.round.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.round.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll index d420636a573fe..0ebc2a82bd828 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -204,7 +204,6 @@ define @round_nxv1f16( %x) { %a = call @llvm.round.nxv1f16( %x) ret %a } -declare @llvm.round.nxv1f16() define @round_nxv2f16( %x) { ; ZVFH-LABEL: round_nxv2f16: @@ -244,7 +243,6 @@ define @round_nxv2f16( %x) { %a = call @llvm.round.nxv2f16( %x) ret %a } -declare @llvm.round.nxv2f16() define @round_nxv4f16( %x) { ; ZVFH-LABEL: round_nxv4f16: @@ -284,7 +282,6 @@ define @round_nxv4f16( %x) { %a = call @llvm.round.nxv4f16( %x) ret %a } -declare @llvm.round.nxv4f16() define @round_nxv8f16( %x) { ; ZVFH-LABEL: round_nxv8f16: @@ -324,7 +321,6 @@ define @round_nxv8f16( %x) { %a = call @llvm.round.nxv8f16( %x) ret %a } -declare @llvm.round.nxv8f16() define @round_nxv16f16( %x) { ; ZVFH-LABEL: round_nxv16f16: @@ -364,7 +360,6 @@ define @round_nxv16f16( %x) { %a = call @llvm.round.nxv16f16( %x) ret %a } -declare @llvm.round.nxv16f16() define @round_nxv32f16( %x) { ; ZVFH-LABEL: round_nxv32f16: @@ -418,7 +413,6 @@ define @round_nxv32f16( %x) { %a = call @llvm.round.nxv32f16( %x) ret %a } -declare @llvm.round.nxv32f16() define @round_nxv1f32( %x) { ; CHECK-LABEL: round_nxv1f32: @@ -438,7 +432,6 @@ define @round_nxv1f32( %x) { %a = call @llvm.round.nxv1f32( %x) ret %a } -declare @llvm.round.nxv1f32() define @round_nxv2f32( %x) { ; CHECK-LABEL: round_nxv2f32: @@ -458,7 +451,6 @@ define @round_nxv2f32( %x) { %a = call @llvm.round.nxv2f32( %x) ret %a } -declare @llvm.round.nxv2f32() define @round_nxv4f32( %x) { ; CHECK-LABEL: round_nxv4f32: @@ -478,7 +470,6 @@ define @round_nxv4f32( %x) { %a = call @llvm.round.nxv4f32( %x) ret %a } -declare @llvm.round.nxv4f32() define @round_nxv8f32( %x) { ; CHECK-LABEL: round_nxv8f32: @@ -498,7 +489,6 @@ define @round_nxv8f32( %x) { %a = call @llvm.round.nxv8f32( %x) ret %a } -declare @llvm.round.nxv8f32() define @round_nxv16f32( %x) { ; CHECK-LABEL: round_nxv16f32: @@ -518,7 +508,6 @@ define @round_nxv16f32( %x) { %a = call @llvm.round.nxv16f32( %x) ret %a } -declare @llvm.round.nxv16f32() define @round_nxv1f64( %x) { ; RV32ZVFH-LABEL: round_nxv1f64: @@ -585,7 +574,6 @@ define @round_nxv1f64( %x) { %a = call @llvm.round.nxv1f64( %x) ret %a } -declare @llvm.round.nxv1f64() define @round_nxv2f64( %x) { ; RV32ZVFH-LABEL: round_nxv2f64: @@ -652,7 +640,6 @@ define @round_nxv2f64( %x) { %a = call @llvm.round.nxv2f64( %x) ret %a } -declare @llvm.round.nxv2f64() define @round_nxv4f64( %x) { ; RV32ZVFH-LABEL: round_nxv4f64: @@ -719,7 +706,6 @@ define @round_nxv4f64( %x) { %a = call @llvm.round.nxv4f64( %x) ret %a } -declare @llvm.round.nxv4f64() define @round_nxv8f64( %x) { ; RV32ZVFH-LABEL: round_nxv8f64: @@ -786,4 +772,3 @@ define @round_nxv8f64( %x) { %a = call @llvm.round.nxv8f64( %x) ret %a } -declare @llvm.round.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll index de766895c734f..cd9d124e4b08c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll @@ -28,7 +28,6 @@ define @roundeven_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv1f16(, metadata) define @roundeven_nxv2f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv2f16: @@ -52,7 +51,6 @@ define @roundeven_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv2f16(, metadata) define @roundeven_nxv4f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv4f16: @@ -76,7 +74,6 @@ define @roundeven_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv4f16(, metadata) define @roundeven_nxv8f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv8f16: @@ -100,7 +97,6 @@ define @roundeven_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.roundeven.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv8f16(, metadata) define @roundeven_nxv16f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv16f16: @@ -124,7 +120,6 @@ define @roundeven_nxv16f16( %x) strictf %a = call @llvm.experimental.constrained.roundeven.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv16f16(, metadata) define @roundeven_nxv32f16( %x) strictfp { ; CHECK-LABEL: roundeven_nxv32f16: @@ -148,7 +143,6 @@ define @roundeven_nxv32f16( %x) strictf %a = call @llvm.experimental.constrained.roundeven.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv32f16(, metadata) define @roundeven_nxv1f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv1f32: @@ -171,7 +165,6 @@ define @roundeven_nxv1f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv1f32(, metadata) define @roundeven_nxv2f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv2f32: @@ -194,7 +187,6 @@ define @roundeven_nxv2f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv2f32(, metadata) define @roundeven_nxv4f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv4f32: @@ -217,7 +209,6 @@ define @roundeven_nxv4f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv4f32(, metadata) define @roundeven_nxv8f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv8f32: @@ -240,7 +231,6 @@ define @roundeven_nxv8f32( %x) strictfp %a = call @llvm.experimental.constrained.roundeven.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv8f32(, metadata) define @roundeven_nxv16f32( %x) strictfp { ; CHECK-LABEL: roundeven_nxv16f32: @@ -263,7 +253,6 @@ define @roundeven_nxv16f32( %x) stric %a = call @llvm.experimental.constrained.roundeven.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv16f32(, metadata) define @roundeven_nxv1f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv1f64: @@ -305,7 +294,6 @@ define @roundeven_nxv1f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv1f64(, metadata) define @roundeven_nxv2f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv2f64: @@ -347,7 +335,6 @@ define @roundeven_nxv2f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv2f64(, metadata) define @roundeven_nxv4f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv4f64: @@ -389,7 +376,6 @@ define @roundeven_nxv4f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv4f64(, metadata) define @roundeven_nxv8f64( %x) strictfp { ; RV32-LABEL: roundeven_nxv8f64: @@ -431,4 +417,3 @@ define @roundeven_nxv8f64( %x) strict %a = call @llvm.experimental.constrained.roundeven.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.roundeven.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll index b9121c55684ee..5991f1a5cecfa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll @@ -203,7 +203,6 @@ define @roundeven_nxv1f16( %x) { %a = call @llvm.roundeven.nxv1f16( %x) ret %a } -declare @llvm.roundeven.nxv1f16() define @roundeven_nxv2f16( %x) { ; ZVFH-LABEL: roundeven_nxv2f16: @@ -243,7 +242,6 @@ define @roundeven_nxv2f16( %x) { %a = call @llvm.roundeven.nxv2f16( %x) ret %a } -declare @llvm.roundeven.nxv2f16() define @roundeven_nxv4f16( %x) { ; ZVFH-LABEL: roundeven_nxv4f16: @@ -283,7 +281,6 @@ define @roundeven_nxv4f16( %x) { %a = call @llvm.roundeven.nxv4f16( %x) ret %a } -declare @llvm.roundeven.nxv4f16() define @roundeven_nxv8f16( %x) { ; ZVFH-LABEL: roundeven_nxv8f16: @@ -323,7 +320,6 @@ define @roundeven_nxv8f16( %x) { %a = call @llvm.roundeven.nxv8f16( %x) ret %a } -declare @llvm.roundeven.nxv8f16() define @roundeven_nxv16f16( %x) { ; ZVFH-LABEL: roundeven_nxv16f16: @@ -363,7 +359,6 @@ define @roundeven_nxv16f16( %x) { %a = call @llvm.roundeven.nxv16f16( %x) ret %a } -declare @llvm.roundeven.nxv16f16() define @roundeven_nxv32f16( %x) { ; ZVFH-LABEL: roundeven_nxv32f16: @@ -417,7 +412,6 @@ define @roundeven_nxv32f16( %x) { %a = call @llvm.roundeven.nxv32f16( %x) ret %a } -declare @llvm.roundeven.nxv32f16() define @roundeven_nxv1f32( %x) { ; CHECK-LABEL: roundeven_nxv1f32: @@ -437,7 +431,6 @@ define @roundeven_nxv1f32( %x) { %a = call @llvm.roundeven.nxv1f32( %x) ret %a } -declare @llvm.roundeven.nxv1f32() define @roundeven_nxv2f32( %x) { ; CHECK-LABEL: roundeven_nxv2f32: @@ -457,7 +450,6 @@ define @roundeven_nxv2f32( %x) { %a = call @llvm.roundeven.nxv2f32( %x) ret %a } -declare @llvm.roundeven.nxv2f32() define @roundeven_nxv4f32( %x) { ; CHECK-LABEL: roundeven_nxv4f32: @@ -477,7 +469,6 @@ define @roundeven_nxv4f32( %x) { %a = call @llvm.roundeven.nxv4f32( %x) ret %a } -declare @llvm.roundeven.nxv4f32() define @roundeven_nxv8f32( %x) { ; CHECK-LABEL: roundeven_nxv8f32: @@ -497,7 +488,6 @@ define @roundeven_nxv8f32( %x) { %a = call @llvm.roundeven.nxv8f32( %x) ret %a } -declare @llvm.roundeven.nxv8f32() define @roundeven_nxv16f32( %x) { ; CHECK-LABEL: roundeven_nxv16f32: @@ -517,7 +507,6 @@ define @roundeven_nxv16f32( %x) { %a = call @llvm.roundeven.nxv16f32( %x) ret %a } -declare @llvm.roundeven.nxv16f32() define @roundeven_nxv1f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv1f64: @@ -584,7 +573,6 @@ define @roundeven_nxv1f64( %x) { %a = call @llvm.roundeven.nxv1f64( %x) ret %a } -declare @llvm.roundeven.nxv1f64() define @roundeven_nxv2f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv2f64: @@ -651,7 +639,6 @@ define @roundeven_nxv2f64( %x) { %a = call @llvm.roundeven.nxv2f64( %x) ret %a } -declare @llvm.roundeven.nxv2f64() define @roundeven_nxv4f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv4f64: @@ -718,7 +705,6 @@ define @roundeven_nxv4f64( %x) { %a = call @llvm.roundeven.nxv4f64( %x) ret %a } -declare @llvm.roundeven.nxv4f64() define @roundeven_nxv8f64( %x) { ; RV32ZVFH-LABEL: roundeven_nxv8f64: @@ -785,4 +771,3 @@ define @roundeven_nxv8f64( %x) { %a = call @llvm.roundeven.nxv8f64( %x) ret %a } -declare @llvm.roundeven.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll index 352fda91ab9fa..736dd1225da88 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll @@ -2,7 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fshr.nxv1i8(, , , , i32) define @fshr_v1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i8: ; CHECK: # %bb.0: @@ -19,7 +18,6 @@ define @fshr_v1i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv1i8(, , , , i32) define @fshl_v1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i8: ; CHECK: # %bb.0: @@ -36,7 +34,6 @@ define @fshl_v1i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv2i8(, , , , i32) define @fshr_v2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i8: ; CHECK: # %bb.0: @@ -53,7 +50,6 @@ define @fshr_v2i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv2i8(, , , , i32) define @fshl_v2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i8: ; CHECK: # %bb.0: @@ -70,7 +66,6 @@ define @fshl_v2i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv4i8(, , , , i32) define @fshr_v4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i8: ; CHECK: # %bb.0: @@ -87,7 +82,6 @@ define @fshr_v4i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv4i8(, , , , i32) define @fshl_v4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i8: ; CHECK: # %bb.0: @@ -104,7 +98,6 @@ define @fshl_v4i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv8i8(, , , , i32) define @fshr_v8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i8: ; CHECK: # %bb.0: @@ -121,7 +114,6 @@ define @fshr_v8i8( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv8i8(, , , , i32) define @fshl_v8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i8: ; CHECK: # %bb.0: @@ -138,7 +130,6 @@ define @fshl_v8i8( %a, %b, ret %res } -declare @llvm.vp.fshr.nxv16i8(, , , , i32) define @fshr_v16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i8: ; CHECK: # %bb.0: @@ -155,7 +146,6 @@ define @fshr_v16i8( %a, ret %res } -declare @llvm.vp.fshl.nxv16i8(, , , , i32) define @fshl_v16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i8: ; CHECK: # %bb.0: @@ -172,7 +162,6 @@ define @fshl_v16i8( %a, ret %res } -declare @llvm.vp.fshr.nxv32i8(, , , , i32) define @fshr_v32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i8: ; CHECK: # %bb.0: @@ -189,7 +178,6 @@ define @fshr_v32i8( %a, ret %res } -declare @llvm.vp.fshl.nxv32i8(, , , , i32) define @fshl_v32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i8: ; CHECK: # %bb.0: @@ -206,7 +194,6 @@ define @fshl_v32i8( %a, ret %res } -declare @llvm.vp.fshr.nxv64i8(, , , , i32) define @fshr_v64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v64i8: ; CHECK: # %bb.0: @@ -240,7 +227,6 @@ define @fshr_v64i8( %a, ret %res } -declare @llvm.vp.fshl.nxv64i8(, , , , i32) define @fshl_v64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v64i8: ; CHECK: # %bb.0: @@ -274,7 +260,6 @@ define @fshl_v64i8( %a, ret %res } -declare @llvm.vp.fshr.nxv1i16(, , , , i32) define @fshr_v1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i16: ; CHECK: # %bb.0: @@ -291,7 +276,6 @@ define @fshr_v1i16( %a, ret %res } -declare @llvm.vp.fshl.nxv1i16(, , , , i32) define @fshl_v1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i16: ; CHECK: # %bb.0: @@ -308,7 +292,6 @@ define @fshl_v1i16( %a, ret %res } -declare @llvm.vp.fshr.nxv2i16(, , , , i32) define @fshr_v2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i16: ; CHECK: # %bb.0: @@ -325,7 +308,6 @@ define @fshr_v2i16( %a, ret %res } -declare @llvm.vp.fshl.nxv2i16(, , , , i32) define @fshl_v2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i16: ; CHECK: # %bb.0: @@ -342,7 +324,6 @@ define @fshl_v2i16( %a, ret %res } -declare @llvm.vp.fshr.nxv4i16(, , , , i32) define @fshr_v4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i16: ; CHECK: # %bb.0: @@ -359,7 +340,6 @@ define @fshr_v4i16( %a, ret %res } -declare @llvm.vp.fshl.nxv4i16(, , , , i32) define @fshl_v4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i16: ; CHECK: # %bb.0: @@ -376,7 +356,6 @@ define @fshl_v4i16( %a, ret %res } -declare @llvm.vp.fshr.nxv8i16(, , , , i32) define @fshr_v8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i16: ; CHECK: # %bb.0: @@ -393,7 +372,6 @@ define @fshr_v8i16( %a, ret %res } -declare @llvm.vp.fshl.nxv8i16(, , , , i32) define @fshl_v8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i16: ; CHECK: # %bb.0: @@ -410,7 +388,6 @@ define @fshl_v8i16( %a, ret %res } -declare @llvm.vp.fshr.nxv16i16(, , , , i32) define @fshr_v16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i16: ; CHECK: # %bb.0: @@ -427,7 +404,6 @@ define @fshr_v16i16( %a, %res } -declare @llvm.vp.fshl.nxv16i16(, , , , i32) define @fshl_v16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i16: ; CHECK: # %bb.0: @@ -444,7 +420,6 @@ define @fshl_v16i16( %a, %res } -declare @llvm.vp.fshr.nxv32i16(, , , , i32) define @fshr_v32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v32i16: ; CHECK: # %bb.0: @@ -478,7 +453,6 @@ define @fshr_v32i16( %a, %res } -declare @llvm.vp.fshl.nxv32i16(, , , , i32) define @fshl_v32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v32i16: ; CHECK: # %bb.0: @@ -512,7 +486,6 @@ define @fshl_v32i16( %a, %res } -declare @llvm.vp.fshr.nxv1i32(, , , , i32) define @fshr_v1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i32: ; CHECK: # %bb.0: @@ -530,7 +503,6 @@ define @fshr_v1i32( %a, ret %res } -declare @llvm.vp.fshl.nxv1i32(, , , , i32) define @fshl_v1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i32: ; CHECK: # %bb.0: @@ -548,7 +520,6 @@ define @fshl_v1i32( %a, ret %res } -declare @llvm.vp.fshr.nxv2i32(, , , , i32) define @fshr_v2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i32: ; CHECK: # %bb.0: @@ -566,7 +537,6 @@ define @fshr_v2i32( %a, ret %res } -declare @llvm.vp.fshl.nxv2i32(, , , , i32) define @fshl_v2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i32: ; CHECK: # %bb.0: @@ -584,7 +554,6 @@ define @fshl_v2i32( %a, ret %res } -declare @llvm.vp.fshr.nxv4i32(, , , , i32) define @fshr_v4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i32: ; CHECK: # %bb.0: @@ -602,7 +571,6 @@ define @fshr_v4i32( %a, ret %res } -declare @llvm.vp.fshl.nxv4i32(, , , , i32) define @fshl_v4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i32: ; CHECK: # %bb.0: @@ -620,7 +588,6 @@ define @fshl_v4i32( %a, ret %res } -declare @llvm.vp.fshr.nxv8i32(, , , , i32) define @fshr_v8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i32: ; CHECK: # %bb.0: @@ -638,7 +605,6 @@ define @fshr_v8i32( %a, ret %res } -declare @llvm.vp.fshl.nxv8i32(, , , , i32) define @fshl_v8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i32: ; CHECK: # %bb.0: @@ -656,7 +622,6 @@ define @fshl_v8i32( %a, ret %res } -declare @llvm.vp.fshr.nxv16i32(, , , , i32) define @fshr_v16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i32: ; CHECK: # %bb.0: @@ -691,7 +656,6 @@ define @fshr_v16i32( %a, %res } -declare @llvm.vp.fshl.nxv16i32(, , , , i32) define @fshl_v16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i32: ; CHECK: # %bb.0: @@ -727,7 +691,6 @@ define @fshl_v16i32( %a, %res } -declare @llvm.vp.fshr.nxv1i64(, , , , i32) define @fshr_v1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i64: ; CHECK: # %bb.0: @@ -745,7 +708,6 @@ define @fshr_v1i64( %a, ret %res } -declare @llvm.vp.fshl.nxv1i64(, , , , i32) define @fshl_v1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i64: ; CHECK: # %bb.0: @@ -763,7 +725,6 @@ define @fshl_v1i64( %a, ret %res } -declare @llvm.vp.fshr.nxv2i64(, , , , i32) define @fshr_v2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v2i64: ; CHECK: # %bb.0: @@ -781,7 +742,6 @@ define @fshr_v2i64( %a, ret %res } -declare @llvm.vp.fshl.nxv2i64(, , , , i32) define @fshl_v2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v2i64: ; CHECK: # %bb.0: @@ -799,7 +759,6 @@ define @fshl_v2i64( %a, ret %res } -declare @llvm.vp.fshr.nxv4i64(, , , , i32) define @fshr_v4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v4i64: ; CHECK: # %bb.0: @@ -817,7 +776,6 @@ define @fshr_v4i64( %a, ret %res } -declare @llvm.vp.fshl.nxv4i64(, , , , i32) define @fshl_v4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v4i64: ; CHECK: # %bb.0: @@ -835,7 +793,6 @@ define @fshl_v4i64( %a, ret %res } -declare @llvm.vp.fshr.nxv7i64(, , , , i32) define @fshr_v7i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v7i64: ; CHECK: # %bb.0: @@ -870,7 +827,6 @@ define @fshr_v7i64( %a, ret %res } -declare @llvm.vp.fshl.nxv7i64(, , , , i32) define @fshl_v7i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v7i64: ; CHECK: # %bb.0: @@ -906,7 +862,6 @@ define @fshl_v7i64( %a, ret %res } -declare @llvm.vp.fshr.nxv8i64(, , , , i32) define @fshr_v8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v8i64: ; CHECK: # %bb.0: @@ -941,7 +896,6 @@ define @fshr_v8i64( %a, ret %res } -declare @llvm.vp.fshl.nxv8i64(, , , , i32) define @fshl_v8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v8i64: ; CHECK: # %bb.0: @@ -977,7 +931,6 @@ define @fshl_v8i64( %a, ret %res } -declare @llvm.vp.fshr.nxv16i64(, , , , i32) define @fshr_v16i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v16i64: ; CHECK: # %bb.0: @@ -1082,7 +1035,6 @@ define @fshr_v16i64( %a, %res } -declare @llvm.vp.fshl.nxv16i64(, , , , i32) define @fshl_v16i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v16i64: ; CHECK: # %bb.0: @@ -1171,7 +1123,6 @@ define @fshl_v16i64( %a, @llvm.vp.fshr.nxv1i9(, , , , i32) define @fshr_v1i9( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i9: ; CHECK: # %bb.0: @@ -1194,7 +1145,6 @@ define @fshr_v1i9( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv1i9(, , , , i32) define @fshl_v1i9( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i9: ; CHECK: # %bb.0: @@ -1216,9 +1166,6 @@ define @fshl_v1i9( %a, %b, ret %res } -declare @llvm.vp.trunc.nxv1i4.nxv1i8(, , i32) -declare @llvm.vp.zext.nxv1i8.nxv1i4(, , i32) -declare @llvm.vp.fshr.nxv1i4(, , , , i32) define @fshr_v1i4( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshr_v1i4: ; CHECK: # %bb.0: @@ -1240,7 +1187,6 @@ define @fshr_v1i4( %a, %b, ret %res } -declare @llvm.vp.fshl.nxv1i4(, , , , i32) define @fshl_v1i4( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: fshl_v1i4: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll index eae21a76f3f00..fa3ce9428c350 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll @@ -36,5 +36,3 @@ define @fshl( %a, %b, %res } -declare @llvm.fshr.v4i32( %a, %b, %c) -declare @llvm.fshl.v4i32( %a, %b, %c) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll index 63cb72e8795e1..adeee2bd82b57 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll @@ -24,7 +24,6 @@ define @trunc_nxv1f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv1f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f16(, metadata) define @trunc_nxv2f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f16: @@ -46,7 +45,6 @@ define @trunc_nxv2f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv2f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f16(, metadata) define @trunc_nxv4f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f16: @@ -68,7 +66,6 @@ define @trunc_nxv4f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv4f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f16(, metadata) define @trunc_nxv8f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f16: @@ -90,7 +87,6 @@ define @trunc_nxv8f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv8f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f16(, metadata) define @trunc_nxv16f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv16f16: @@ -112,7 +108,6 @@ define @trunc_nxv16f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv16f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv16f16(, metadata) define @trunc_nxv32f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv32f16: @@ -134,7 +129,6 @@ define @trunc_nxv32f16( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv32f16( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv32f16(, metadata) define @trunc_nxv1f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv1f32: @@ -155,7 +149,6 @@ define @trunc_nxv1f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv1f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f32(, metadata) define @trunc_nxv2f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv2f32: @@ -176,7 +169,6 @@ define @trunc_nxv2f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv2f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f32(, metadata) define @trunc_nxv4f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv4f32: @@ -197,7 +189,6 @@ define @trunc_nxv4f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv4f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f32(, metadata) define @trunc_nxv8f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv8f32: @@ -218,7 +209,6 @@ define @trunc_nxv8f32( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv8f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f32(, metadata) define @trunc_nxv16f32( %x) strictfp { ; CHECK-LABEL: trunc_nxv16f32: @@ -239,7 +229,6 @@ define @trunc_nxv16f32( %x) strictfp %a = call @llvm.experimental.constrained.trunc.nxv16f32( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv16f32(, metadata) define @trunc_nxv1f64( %x) strictfp { ; RV32-LABEL: trunc_nxv1f64: @@ -277,7 +266,6 @@ define @trunc_nxv1f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv1f64(, metadata) define @trunc_nxv2f64( %x) strictfp { ; RV32-LABEL: trunc_nxv2f64: @@ -315,7 +303,6 @@ define @trunc_nxv2f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv2f64(, metadata) define @trunc_nxv4f64( %x) strictfp { ; RV32-LABEL: trunc_nxv4f64: @@ -353,7 +340,6 @@ define @trunc_nxv4f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv4f64(, metadata) define @trunc_nxv8f64( %x) strictfp { ; RV32-LABEL: trunc_nxv8f64: @@ -391,4 +377,3 @@ define @trunc_nxv8f64( %x) strictfp { %a = call @llvm.experimental.constrained.trunc.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } -declare @llvm.experimental.constrained.trunc.nxv8f64(, metadata) diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll index 34b3e8d2849b7..811f2a526ac47 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -184,7 +184,6 @@ define @trunc_nxv1f16( %x) { %a = call @llvm.trunc.nxv1f16( %x) ret %a } -declare @llvm.trunc.nxv1f16() define @trunc_nxv2f16( %x) { ; ZVFH-LABEL: trunc_nxv2f16: @@ -220,7 +219,6 @@ define @trunc_nxv2f16( %x) { %a = call @llvm.trunc.nxv2f16( %x) ret %a } -declare @llvm.trunc.nxv2f16() define @trunc_nxv4f16( %x) { ; ZVFH-LABEL: trunc_nxv4f16: @@ -256,7 +254,6 @@ define @trunc_nxv4f16( %x) { %a = call @llvm.trunc.nxv4f16( %x) ret %a } -declare @llvm.trunc.nxv4f16() define @trunc_nxv8f16( %x) { ; ZVFH-LABEL: trunc_nxv8f16: @@ -292,7 +289,6 @@ define @trunc_nxv8f16( %x) { %a = call @llvm.trunc.nxv8f16( %x) ret %a } -declare @llvm.trunc.nxv8f16() define @trunc_nxv16f16( %x) { ; ZVFH-LABEL: trunc_nxv16f16: @@ -328,7 +324,6 @@ define @trunc_nxv16f16( %x) { %a = call @llvm.trunc.nxv16f16( %x) ret %a } -declare @llvm.trunc.nxv16f16() define @trunc_nxv32f16( %x) { ; ZVFH-LABEL: trunc_nxv32f16: @@ -376,7 +371,6 @@ define @trunc_nxv32f16( %x) { %a = call @llvm.trunc.nxv32f16( %x) ret %a } -declare @llvm.trunc.nxv32f16() define @trunc_nxv1f32( %x) { ; CHECK-LABEL: trunc_nxv1f32: @@ -394,7 +388,6 @@ define @trunc_nxv1f32( %x) { %a = call @llvm.trunc.nxv1f32( %x) ret %a } -declare @llvm.trunc.nxv1f32() define @trunc_nxv2f32( %x) { ; CHECK-LABEL: trunc_nxv2f32: @@ -412,7 +405,6 @@ define @trunc_nxv2f32( %x) { %a = call @llvm.trunc.nxv2f32( %x) ret %a } -declare @llvm.trunc.nxv2f32() define @trunc_nxv4f32( %x) { ; CHECK-LABEL: trunc_nxv4f32: @@ -430,7 +422,6 @@ define @trunc_nxv4f32( %x) { %a = call @llvm.trunc.nxv4f32( %x) ret %a } -declare @llvm.trunc.nxv4f32() define @trunc_nxv8f32( %x) { ; CHECK-LABEL: trunc_nxv8f32: @@ -448,7 +439,6 @@ define @trunc_nxv8f32( %x) { %a = call @llvm.trunc.nxv8f32( %x) ret %a } -declare @llvm.trunc.nxv8f32() define @trunc_nxv16f32( %x) { ; CHECK-LABEL: trunc_nxv16f32: @@ -466,7 +456,6 @@ define @trunc_nxv16f32( %x) { %a = call @llvm.trunc.nxv16f32( %x) ret %a } -declare @llvm.trunc.nxv16f32() define @trunc_nxv1f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv1f64: @@ -525,7 +514,6 @@ define @trunc_nxv1f64( %x) { %a = call @llvm.trunc.nxv1f64( %x) ret %a } -declare @llvm.trunc.nxv1f64() define @trunc_nxv2f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv2f64: @@ -584,7 +572,6 @@ define @trunc_nxv2f64( %x) { %a = call @llvm.trunc.nxv2f64( %x) ret %a } -declare @llvm.trunc.nxv2f64() define @trunc_nxv4f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv4f64: @@ -643,7 +630,6 @@ define @trunc_nxv4f64( %x) { %a = call @llvm.trunc.nxv4f64( %x) ret %a } -declare @llvm.trunc.nxv4f64() define @trunc_nxv8f64( %x) { ; RV32ZVFH-LABEL: trunc_nxv8f64: @@ -702,4 +688,3 @@ define @trunc_nxv8f64( %x) { %a = call @llvm.trunc.nxv8f64( %x) ret %a } -declare @llvm.trunc.nxv8f64() diff --git a/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll b/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll index aea688f03cf72..3223bf108ab4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll +++ b/llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll @@ -2,10 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare i32 @llvm.experimental.get.vector.length.i16(i16, i32, i1) -declare i32 @llvm.experimental.get.vector.length.i32(i32, i32, i1) -declare i32 @llvm.experimental.get.vector.length.i64(i64, i32, i1) - define i32 @vector_length_i16(i16 zeroext %tc) { ; CHECK-LABEL: vector_length_i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll index d7bf566b9b5f4..6413b914b6440 100644 --- a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll @@ -8,8 +8,6 @@ ; trunc ; ================================================================================ -declare @llvm.trunc.nxv1f16() - define @trunc_nxv1f16_to_si8( %x) { ; CHECK-LABEL: trunc_nxv1f16_to_si8: ; CHECK: # %bb.0: @@ -128,8 +126,6 @@ define @trunc_nxv1f16_to_ui64( %x) { ; trunc ; ================================================================================ -declare @llvm.trunc.nxv4f16() - define @trunc_nxv4f16_to_si8( %x) { ; CHECK-LABEL: trunc_nxv4f16_to_si8: ; CHECK: # %bb.0: @@ -248,8 +244,6 @@ define @trunc_nxv4f16_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv1f16() - define @ceil_nxv1f16_to_si8( %x) { ; CHECK-LABEL: ceil_nxv1f16_to_si8: ; CHECK: # %bb.0: @@ -452,8 +446,6 @@ define @ceil_nxv1f16_to_ui64( %x) { ; ceil ; ================================================================================ -declare @llvm.ceil.nxv4f16() - define @ceil_nxv4f16_to_si8( %x) { ; CHECK-LABEL: ceil_nxv4f16_to_si8: ; CHECK: # %bb.0: @@ -656,8 +648,6 @@ define @ceil_nxv4f16_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv1f16() - define @rint_nxv1f16_to_si8( %x) { ; CHECK-LABEL: rint_nxv1f16_to_si8: ; CHECK: # %bb.0: @@ -844,8 +834,6 @@ define @rint_nxv1f16_to_ui64( %x) { ; rint ; ================================================================================ -declare @llvm.rint.nxv4f16() - define @rint_nxv4f16_to_si8( %x) { ; CHECK-LABEL: rint_nxv4f16_to_si8: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll index 9475989d46343..c39630ae07e27 100644 --- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll @@ -19,4 +19,3 @@ define @vpload_nxv8i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv8i64.p0(ptr, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll index 0135ce790610d..962fa729722cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -471,8 +471,6 @@ define @insert_nxv4i1_nxv1i1_2( %v, %vec } -declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64) - define void @insert_nxv8i64_nxv16i64( %sv0, %sv1, ptr %out) { ; CHECK-LABEL: insert_nxv8i64_nxv16i64: ; CHECK: # %bb.0: @@ -642,25 +640,3 @@ define @insert_splat_to_splat2() { attributes #0 = { vscale_range(2,1024) } -declare @llvm.vector.insert.nxv1i1.nxv4i1(, , i64) -declare @llvm.vector.insert.nxv8i1.nxv32i1(, , i64) - -declare @llvm.vector.insert.nxv1i8.nxv16i8(, , i64) - -declare @llvm.vector.insert.nxv1f16.nxv32f16(, , i64) -declare @llvm.vector.insert.nxv2f16.nxv32f16(, , i64) - -declare @llvm.vector.insert.nxv1i8.nxv4i8(, , i64 %idx) - -declare @llvm.vector.insert.nxv2i32.nxv4i32(, , i64) -declare @llvm.vector.insert.nxv4i32.v2i32(, <2 x i32>, i64) - -declare @llvm.vector.insert.nxv2i32.nxv8i32(, , i64 %idx) -declare @llvm.vector.insert.nxv4i32.nxv8i32(, , i64 %idx) - -declare @llvm.vector.insert.nxv1i32.nxv16i32(, , i64 %idx) -declare @llvm.vector.insert.nxv2i32.nxv16i32(, , i64 %idx) -declare @llvm.vector.insert.nxv4i32.nxv16i32(, , i64 %idx) -declare @llvm.vector.insert.nxv8i32.nxv16i32(, , i64 %idx) - -declare @llvm.vector.insert.nxv2i64.v3i64(, <3 x i64>, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll index 6bc934cbdf0d8..423406b511261 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll @@ -14,7 +14,6 @@ define @llrint_nxv1i64_nxv1f32( %x) { %a = call @llvm.llrint.nxv1i64.nxv1f32( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1f32() define @llrint_nxv2i64_nxv2f32( %x) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f32: @@ -26,7 +25,6 @@ define @llrint_nxv2i64_nxv2f32( %x) { %a = call @llvm.llrint.nxv2i64.nxv2f32( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2f32() define @llrint_nxv4i64_nxv4f32( %x) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f32: @@ -38,7 +36,6 @@ define @llrint_nxv4i64_nxv4f32( %x) { %a = call @llvm.llrint.nxv4i64.nxv4f32( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4f32() define @llrint_nxv8i64_nxv8f32( %x) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f32: @@ -50,7 +47,6 @@ define @llrint_nxv8i64_nxv8f32( %x) { %a = call @llvm.llrint.nxv8i64.nxv8f32( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8f32() define @llrint_nxv16i64_nxv16f32( %x) { ; CHECK-LABEL: llrint_nxv16i64_nxv16f32: @@ -63,7 +59,6 @@ define @llrint_nxv16i64_nxv16f32( %x) { %a = call @llvm.llrint.nxv16i64.nxv16f32( %x) ret %a } -declare @llvm.llrint.nxv16i64.nxv16f32() define @llrint_nxv1i64_nxv1f64( %x) { ; CHECK-LABEL: llrint_nxv1i64_nxv1f64: @@ -74,7 +69,6 @@ define @llrint_nxv1i64_nxv1f64( %x) { %a = call @llvm.llrint.nxv1i64.nxv1f64( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1f64() define @llrint_nxv2i64_nxv2f64( %x) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f64: @@ -85,7 +79,6 @@ define @llrint_nxv2i64_nxv2f64( %x) { %a = call @llvm.llrint.nxv2i64.nxv2f64( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2f64() define @llrint_nxv4i64_nxv4f64( %x) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f64: @@ -96,7 +89,6 @@ define @llrint_nxv4i64_nxv4f64( %x) { %a = call @llvm.llrint.nxv4i64.nxv4f64( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4f64() define @llrint_nxv8i64_nxv8f64( %x) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f64: @@ -107,7 +99,6 @@ define @llrint_nxv8i64_nxv8f64( %x) { %a = call @llvm.llrint.nxv8i64.nxv8f64( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8f64() define @llrint_nxv1f16( %x) { ; CHECK-LABEL: llrint_nxv1f16: @@ -120,7 +111,6 @@ define @llrint_nxv1f16( %x) { %a = call @llvm.llrint.nxv1i64.nxv1f16( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1f16() define @llrint_nxv2f16( %x) { ; CHECK-LABEL: llrint_nxv2f16: @@ -133,7 +123,6 @@ define @llrint_nxv2f16( %x) { %a = call @llvm.llrint.nxv2i64.nxv2f16( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2f16() define @llrint_nxv4f16( %x) { ; CHECK-LABEL: llrint_nxv4f16: @@ -146,7 +135,6 @@ define @llrint_nxv4f16( %x) { %a = call @llvm.llrint.nxv4i64.nxv4f16( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4f16() define @llrint_nxv8f16( %x) { ; CHECK-LABEL: llrint_nxv8f16: @@ -159,7 +147,6 @@ define @llrint_nxv8f16( %x) { %a = call @llvm.llrint.nxv8i64.nxv8f16( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8f16() define @llrint_nxv16f16( %x) { ; CHECK-LABEL: llrint_nxv16f16: @@ -174,7 +161,6 @@ define @llrint_nxv16f16( %x) { %a = call @llvm.llrint.nxv16i64.nxv16f16( %x) ret %a } -declare @llvm.llrint.nxv16i64.nxv16f16() define @llrint_nxv1bf16( %x) { ; CHECK-LABEL: llrint_nxv1bf16: @@ -187,7 +173,6 @@ define @llrint_nxv1bf16( %x) { %a = call @llvm.llrint.nxv1i64.nxv1bf16( %x) ret %a } -declare @llvm.llrint.nxv1i64.nxv1bf16() define @llrint_nxv2bf16( %x) { ; CHECK-LABEL: llrint_nxv2bf16: @@ -200,7 +185,6 @@ define @llrint_nxv2bf16( %x) { %a = call @llvm.llrint.nxv2i64.nxv2bf16( %x) ret %a } -declare @llvm.llrint.nxv2i64.nxv2bf16() define @llrint_nxv4bf16( %x) { ; CHECK-LABEL: llrint_nxv4bf16: @@ -213,7 +197,6 @@ define @llrint_nxv4bf16( %x) { %a = call @llvm.llrint.nxv4i64.nxv4bf16( %x) ret %a } -declare @llvm.llrint.nxv4i64.nxv4bf16() define @llrint_nxv8bf16( %x) { ; CHECK-LABEL: llrint_nxv8bf16: @@ -226,7 +209,6 @@ define @llrint_nxv8bf16( %x) { %a = call @llvm.llrint.nxv8i64.nxv8bf16( %x) ret %a } -declare @llvm.llrint.nxv8i64.nxv8bf16() define @llrint_nxv16bf16( %x) { ; CHECK-LABEL: llrint_nxv16bf16: @@ -241,4 +223,3 @@ define @llrint_nxv16bf16( %x) { %a = call @llvm.llrint.nxv16i64.nxv16bf16( %x) ret %a } -declare @llvm.llrint.nxv16i64.nxv16bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll index dbe2d03e1a909..c0a794afac3ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll @@ -14,7 +14,6 @@ define @llrint_nxv1i64_nxv1f32( %x, @llvm.vp.llrint.nxv1i64.nxv1f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv1i64.nxv1f32(, , i32) define @llrint_nxv2i64_nxv2f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f32: @@ -26,7 +25,6 @@ define @llrint_nxv2i64_nxv2f32( %x, @llvm.vp.llrint.nxv2i64.nxv2f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv2i64.nxv2f32(, , i32) define @llrint_nxv4i64_nxv4f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f32: @@ -38,7 +36,6 @@ define @llrint_nxv4i64_nxv4f32( %x, @llvm.vp.llrint.nxv4i64.nxv4f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv4i64.nxv4f32(, , i32) define @llrint_nxv8i64_nxv8f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f32: @@ -50,7 +47,6 @@ define @llrint_nxv8i64_nxv8f32( %x, @llvm.vp.llrint.nxv8i64.nxv8f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv8i64.nxv8f32(, , i32) define @llrint_nxv16i64_nxv16f32( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv16i64_nxv16f32: @@ -78,7 +74,6 @@ define @llrint_nxv16i64_nxv16f32( %x, < %a = call @llvm.vp.llrint.nxv16i64.nxv16f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv16i64.nxv16f32(, , i32) define @llrint_nxv1i64_nxv1f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv1i64_nxv1f64: @@ -89,7 +84,6 @@ define @llrint_nxv1i64_nxv1f64( %x, @llvm.vp.llrint.nxv1i64.nxv1f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv1i64.nxv1f64(, , i32) define @llrint_nxv2i64_nxv2f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv2i64_nxv2f64: @@ -100,7 +94,6 @@ define @llrint_nxv2i64_nxv2f64( %x, @llvm.vp.llrint.nxv2i64.nxv2f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv2i64.nxv2f64(, , i32) define @llrint_nxv4i64_nxv4f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv4i64_nxv4f64: @@ -111,7 +104,6 @@ define @llrint_nxv4i64_nxv4f64( %x, @llvm.vp.llrint.nxv4i64.nxv4f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv4i64.nxv4f64(, , i32) define @llrint_nxv8i64_nxv8f64( %x, %m, i32 zeroext %evl) { ; CHECK-LABEL: llrint_nxv8i64_nxv8f64: @@ -122,4 +114,3 @@ define @llrint_nxv8i64_nxv8f64( %x, @llvm.vp.llrint.nxv8i64.nxv8f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.llrint.nxv8i64.nxv8f64(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll index 5b4c7ba91400f..61cf3da7757f6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll @@ -16,7 +16,6 @@ define @llround_nxv1i64_nxv1f32( %x) { %a = call @llvm.llround.nxv1i64.nxv1f32( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1f32() define @llround_nxv2i64_nxv2f32( %x) { ; CHECK-LABEL: llround_nxv2i64_nxv2f32: @@ -30,7 +29,6 @@ define @llround_nxv2i64_nxv2f32( %x) { %a = call @llvm.llround.nxv2i64.nxv2f32( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2f32() define @llround_nxv4i64_nxv4f32( %x) { ; CHECK-LABEL: llround_nxv4i64_nxv4f32: @@ -44,7 +42,6 @@ define @llround_nxv4i64_nxv4f32( %x) { %a = call @llvm.llround.nxv4i64.nxv4f32( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4f32() define @llround_nxv8i64_nxv8f32( %x) { ; CHECK-LABEL: llround_nxv8i64_nxv8f32: @@ -58,7 +55,6 @@ define @llround_nxv8i64_nxv8f32( %x) { %a = call @llvm.llround.nxv8i64.nxv8f32( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8f32() define @llround_nxv16i64_nxv16f32( %x) { ; CHECK-LABEL: llround_nxv16i64_nxv16f32: @@ -73,7 +69,6 @@ define @llround_nxv16i64_nxv16f32( %x) %a = call @llvm.llround.nxv16i64.nxv16f32( %x) ret %a } -declare @llvm.llround.nxv16i64.nxv16f32() define @llround_nxv1i64_nxv1f64( %x) { ; CHECK-LABEL: llround_nxv1i64_nxv1f64: @@ -86,7 +81,6 @@ define @llround_nxv1i64_nxv1f64( %x) { %a = call @llvm.llround.nxv1i64.nxv1f64( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1f64() define @llround_nxv2i64_nxv2f64( %x) { ; CHECK-LABEL: llround_nxv2i64_nxv2f64: @@ -99,7 +93,6 @@ define @llround_nxv2i64_nxv2f64( %x) { %a = call @llvm.llround.nxv2i64.nxv2f64( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2f64() define @llround_nxv4i64_nxv4f64( %x) { ; CHECK-LABEL: llround_nxv4i64_nxv4f64: @@ -112,7 +105,6 @@ define @llround_nxv4i64_nxv4f64( %x) { %a = call @llvm.llround.nxv4i64.nxv4f64( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4f64() define @llround_nxv8i64_nxv8f64( %x) { ; CHECK-LABEL: llround_nxv8i64_nxv8f64: @@ -125,7 +117,6 @@ define @llround_nxv8i64_nxv8f64( %x) { %a = call @llvm.llround.nxv8i64.nxv8f64( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8f64() define @llround_nxv1f16( %x) { ; CHECK-LABEL: llround_nxv1f16: @@ -140,7 +131,6 @@ define @llround_nxv1f16( %x) { %a = call @llvm.llround.nxv1i64.nxv1f16( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1f16() define @llround_nxv2f16( %x) { ; CHECK-LABEL: llround_nxv2f16: @@ -155,7 +145,6 @@ define @llround_nxv2f16( %x) { %a = call @llvm.llround.nxv2i64.nxv2f16( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2f16() define @llround_nxv4f16( %x) { ; CHECK-LABEL: llround_nxv4f16: @@ -170,7 +159,6 @@ define @llround_nxv4f16( %x) { %a = call @llvm.llround.nxv4i64.nxv4f16( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4f16() define @llround_nxv8f16( %x) { ; CHECK-LABEL: llround_nxv8f16: @@ -185,7 +173,6 @@ define @llround_nxv8f16( %x) { %a = call @llvm.llround.nxv8i64.nxv8f16( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8f16() define @llround_nxv16f16( %x) { ; CHECK-LABEL: llround_nxv16f16: @@ -202,7 +189,6 @@ define @llround_nxv16f16( %x) { %a = call @llvm.llround.nxv16i64.nxv16f16( %x) ret %a } -declare @llvm.llround.nxv16i64.nxv16f16() define @llround_nxv1bf16( %x) { ; CHECK-LABEL: llround_nxv1bf16: @@ -217,7 +203,6 @@ define @llround_nxv1bf16( %x) { %a = call @llvm.llround.nxv1i64.nxv1bf16( %x) ret %a } -declare @llvm.llround.nxv1i64.nxv1bf16() define @llround_nxv2bf16( %x) { ; CHECK-LABEL: llround_nxv2bf16: @@ -232,7 +217,6 @@ define @llround_nxv2bf16( %x) { %a = call @llvm.llround.nxv2i64.nxv2bf16( %x) ret %a } -declare @llvm.llround.nxv2i64.nxv2bf16() define @llround_nxv4bf16( %x) { ; CHECK-LABEL: llround_nxv4bf16: @@ -247,7 +231,6 @@ define @llround_nxv4bf16( %x) { %a = call @llvm.llround.nxv4i64.nxv4bf16( %x) ret %a } -declare @llvm.llround.nxv4i64.nxv4bf16() define @llround_nxv8bf16( %x) { ; CHECK-LABEL: llround_nxv8bf16: @@ -262,7 +245,6 @@ define @llround_nxv8bf16( %x) { %a = call @llvm.llround.nxv8i64.nxv8bf16( %x) ret %a } -declare @llvm.llround.nxv8i64.nxv8bf16() define @llround_nxv16bf16( %x) { ; CHECK-LABEL: llround_nxv16bf16: @@ -279,4 +261,3 @@ define @llround_nxv16bf16( %x) { %a = call @llvm.llround.nxv16i64.nxv16bf16( %x) ret %a } -declare @llvm.llround.nxv16i64.nxv16bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll index 6df738fd72854..ba71f9d7321c6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll @@ -28,7 +28,6 @@ define @lrint_nxv1f32( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1f32( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1f32() define @lrint_nxv2f32( %x) { ; RV32-LABEL: lrint_nxv2f32: @@ -52,7 +51,6 @@ define @lrint_nxv2f32( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2f32( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2f32() define @lrint_nxv4f32( %x) { ; RV32-LABEL: lrint_nxv4f32: @@ -76,7 +74,6 @@ define @lrint_nxv4f32( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4f32( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4f32() define @lrint_nxv8f32( %x) { ; RV32-LABEL: lrint_nxv8f32: @@ -100,7 +97,6 @@ define @lrint_nxv8f32( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8f32( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8f32() define @lrint_nxv16f32( %x) { ; RV32-LABEL: lrint_nxv16f32: @@ -125,7 +121,6 @@ define @lrint_nxv16f32( %x) { %a = call @llvm.lrint.nxv16iXLen.nxv16f32( %x) ret %a } -declare @llvm.lrint.nxv16iXLen.nxv16f32() define @lrint_nxv1f64( %x) { ; RV32-LABEL: lrint_nxv1f64: @@ -150,7 +145,6 @@ define @lrint_nxv1f64( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1f64( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1f64() define @lrint_nxv2f64( %x) { ; RV32-LABEL: lrint_nxv2f64: @@ -175,7 +169,6 @@ define @lrint_nxv2f64( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2f64( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2f64() define @lrint_nxv4f64( %x) { ; RV32-LABEL: lrint_nxv4f64: @@ -200,7 +193,6 @@ define @lrint_nxv4f64( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4f64( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4f64() define @lrint_nxv8f64( %x) { ; RV32-LABEL: lrint_nxv8f64: @@ -225,7 +217,6 @@ define @lrint_nxv8f64( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8f64( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8f64() define @lrint_nxv1f16( %x) { ; RV32-LABEL: lrint_nxv1f16: @@ -254,7 +245,6 @@ define @lrint_nxv1f16( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1f16( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1f16() define @lrint_nxv2f16( %x) { ; RV32-LABEL: lrint_nxv2f16: @@ -283,7 +273,6 @@ define @lrint_nxv2f16( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2f16( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2f16() define @lrint_nxv4f16( %x) { ; RV32-LABEL: lrint_nxv4f16: @@ -312,7 +301,6 @@ define @lrint_nxv4f16( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4f16( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4f16() define @lrint_nxv8f16( %x) { ; RV32-LABEL: lrint_nxv8f16: @@ -341,7 +329,6 @@ define @lrint_nxv8f16( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8f16( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8f16() define @lrint_nxv16f16( %x) { ; RV32-LABEL: lrint_nxv16f16: @@ -372,7 +359,6 @@ define @lrint_nxv16f16( %x) { %a = call @llvm.lrint.nxv16iXLen.nxv16f16( %x) ret %a } -declare @llvm.lrint.nxv16iXLen.nxv16f16() define @lrint_nxv1bf16( %x) { ; RV32-LABEL: lrint_nxv1bf16: @@ -401,7 +387,6 @@ define @lrint_nxv1bf16( %x) { %a = call @llvm.lrint.nxv1iXLen.nxv1bf16( %x) ret %a } -declare @llvm.lrint.nxv1iXLen.nxv1bf16() define @lrint_nxv2bf16( %x) { ; RV32-LABEL: lrint_nxv2bf16: @@ -430,7 +415,6 @@ define @lrint_nxv2bf16( %x) { %a = call @llvm.lrint.nxv2iXLen.nxv2bf16( %x) ret %a } -declare @llvm.lrint.nxv2iXLen.nxv2bf16() define @lrint_nxv4bf16( %x) { ; RV32-LABEL: lrint_nxv4bf16: @@ -459,7 +443,6 @@ define @lrint_nxv4bf16( %x) { %a = call @llvm.lrint.nxv4iXLen.nxv4bf16( %x) ret %a } -declare @llvm.lrint.nxv4iXLen.nxv4bf16() define @lrint_nxv8bf16( %x) { ; RV32-LABEL: lrint_nxv8bf16: @@ -488,7 +471,6 @@ define @lrint_nxv8bf16( %x) { %a = call @llvm.lrint.nxv8iXLen.nxv8bf16( %x) ret %a } -declare @llvm.lrint.nxv8iXLen.nxv8bf16() define @lrint_nxv16bf16( %x) { ; RV32-LABEL: lrint_nxv16bf16: @@ -519,7 +501,6 @@ define @lrint_nxv16bf16( %x) { %a = call @llvm.lrint.nxv16iXLen.nxv16bf16( %x) ret %a } -declare @llvm.lrint.nxv16iXLen.nxv16bf16() define @lrint_nxv32bf16( %x) { ; RV32-LABEL: lrint_nxv32bf16: @@ -636,4 +617,3 @@ define @lrint_nxv32bf16( %x) { %a = call @llvm.lrint.nxv32iXLen.nxv32bf16( %x) ret %a } -declare @llvm.lrint.nxv32iXLen.nxv32bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll index 98d32b36c23c1..c09df1a60d2ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll @@ -28,7 +28,6 @@ define @lrint_nxv1f32( %x, @llvm.vp.lrint.nxv1iXLen.nxv1f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv1iXLen.nxv1f32(, , i32) define @lrint_nxv2f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv2f32: @@ -52,7 +51,6 @@ define @lrint_nxv2f32( %x, @llvm.vp.lrint.nxv2iXLen.nxv2f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv2iXLen.nxv2f32(, , i32) define @lrint_nxv4f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv4f32: @@ -76,7 +74,6 @@ define @lrint_nxv4f32( %x, @llvm.vp.lrint.nxv4iXLen.nxv4f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv4iXLen.nxv4f32(, , i32) define @lrint_nxv8f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv8f32: @@ -100,7 +97,6 @@ define @lrint_nxv8f32( %x, @llvm.vp.lrint.nxv8iXLen.nxv8f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv8iXLen.nxv8f32(, , i32) define @lrint_nxv16f32( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv16f32: @@ -140,7 +136,6 @@ define @lrint_nxv16f32( %x, @llvm.vp.lrint.nxv16iXLen.nxv16f32( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv16iXLen.nxv16f32(, , i32) define @lrint_nxv1f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv1f64: @@ -165,7 +160,6 @@ define @lrint_nxv1f64( %x, @llvm.vp.lrint.nxv1iXLen.nxv1f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv1iXLen.nxv1f64(, , i32) define @lrint_nxv2f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv2f64: @@ -190,7 +184,6 @@ define @lrint_nxv2f64( %x, @llvm.vp.lrint.nxv2iXLen.nxv2f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv2iXLen.nxv2f64(, , i32) define @lrint_nxv4f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv4f64: @@ -215,7 +208,6 @@ define @lrint_nxv4f64( %x, @llvm.vp.lrint.nxv4iXLen.nxv4f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv4iXLen.nxv4f64(, , i32) define @lrint_nxv8f64( %x, %m, i32 zeroext %evl) { ; RV32-LABEL: lrint_nxv8f64: @@ -240,4 +232,3 @@ define @lrint_nxv8f64( %x, @llvm.vp.lrint.nxv8iXLen.nxv8f64( %x, %m, i32 %evl) ret %a } -declare @llvm.vp.lrint.nxv8iXLen.nxv8f64(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll index 109b9055e7b55..03e18738a491d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll @@ -34,7 +34,6 @@ define @lround_nxv1f32( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1f32( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1f32() define @lround_nxv2f32( %x) { ; RV32-LABEL: lround_nxv2f32: @@ -64,7 +63,6 @@ define @lround_nxv2f32( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2f32( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2f32() define @lround_nxv4f32( %x) { ; RV32-LABEL: lround_nxv4f32: @@ -94,7 +92,6 @@ define @lround_nxv4f32( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4f32( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4f32() define @lround_nxv8f32( %x) { ; RV32-LABEL: lround_nxv8f32: @@ -124,7 +121,6 @@ define @lround_nxv8f32( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8f32( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8f32() define @lround_nxv16f32( %x) { ; RV32-LABEL: lround_nxv16f32: @@ -155,7 +151,6 @@ define @lround_nxv16f32( %x) { %a = call @llvm.lround.nxv16iXLen.nxv16f32( %x) ret %a } -declare @llvm.lround.nxv16iXLen.nxv16f32() define @lround_nxv1f64( %x) { ; RV32-LABEL: lround_nxv1f64: @@ -186,7 +181,6 @@ define @lround_nxv1f64( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1f64( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1f64() define @lround_nxv2f64( %x) { ; RV32-LABEL: lround_nxv2f64: @@ -217,7 +211,6 @@ define @lround_nxv2f64( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2f64( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2f64() define @lround_nxv4f64( %x) { ; RV32-LABEL: lround_nxv4f64: @@ -248,7 +241,6 @@ define @lround_nxv4f64( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4f64( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4f64() define @lround_nxv8f64( %x) { ; RV32-LABEL: lround_nxv8f64: @@ -279,7 +271,6 @@ define @lround_nxv8f64( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8f64( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8f64() define @lround_nxv1f16( %x) { ; RV32-LABEL: lround_nxv1f16: @@ -314,7 +305,6 @@ define @lround_nxv1f16( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1f16( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1f16() define @lround_nxv2f16( %x) { ; RV32-LABEL: lround_nxv2f16: @@ -349,7 +339,6 @@ define @lround_nxv2f16( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2f16( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2f16() define @lround_nxv4f16( %x) { ; RV32-LABEL: lround_nxv4f16: @@ -384,7 +373,6 @@ define @lround_nxv4f16( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4f16( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4f16() define @lround_nxv8f16( %x) { ; RV32-LABEL: lround_nxv8f16: @@ -419,7 +407,6 @@ define @lround_nxv8f16( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8f16( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8f16() define @lround_nxv16f16( %x) { ; RV32-LABEL: lround_nxv16f16: @@ -456,7 +443,6 @@ define @lround_nxv16f16( %x) { %a = call @llvm.lround.nxv16iXLen.nxv16f16( %x) ret %a } -declare @llvm.lround.nxv16iXLen.nxv16f16() define @lround_nxv1bf16( %x) { ; RV32-LABEL: lround_nxv1bf16: @@ -491,7 +477,6 @@ define @lround_nxv1bf16( %x) { %a = call @llvm.lround.nxv1iXLen.nxv1bf16( %x) ret %a } -declare @llvm.lround.nxv1iXLen.nxv1bf16() define @lround_nxv2bf16( %x) { ; RV32-LABEL: lround_nxv2bf16: @@ -526,7 +511,6 @@ define @lround_nxv2bf16( %x) { %a = call @llvm.lround.nxv2iXLen.nxv2bf16( %x) ret %a } -declare @llvm.lround.nxv2iXLen.nxv2bf16() define @lround_nxv4bf16( %x) { ; RV32-LABEL: lround_nxv4bf16: @@ -561,7 +545,6 @@ define @lround_nxv4bf16( %x) { %a = call @llvm.lround.nxv4iXLen.nxv4bf16( %x) ret %a } -declare @llvm.lround.nxv4iXLen.nxv4bf16() define @lround_nxv8bf16( %x) { ; RV32-LABEL: lround_nxv8bf16: @@ -596,7 +579,6 @@ define @lround_nxv8bf16( %x) { %a = call @llvm.lround.nxv8iXLen.nxv8bf16( %x) ret %a } -declare @llvm.lround.nxv8iXLen.nxv8bf16() define @lround_nxv16bf16( %x) { ; RV32-LABEL: lround_nxv16bf16: @@ -633,7 +615,6 @@ define @lround_nxv16bf16( %x) { %a = call @llvm.lround.nxv16iXLen.nxv16bf16( %x) ret %a } -declare @llvm.lround.nxv16iXLen.nxv16bf16() define @lround_nxv32bf16( %x) { ; RV32-LABEL: lround_nxv32bf16: @@ -756,4 +737,3 @@ define @lround_nxv32bf16( %x) { %a = call @llvm.lround.nxv32iXLen.nxv32bf16( %x) ret %a } -declare @llvm.lround.nxv32iXLen.nxv32bf16() diff --git a/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll index 40e6567ac802d..05b261b95e30c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare <1 x i1> @llvm.vp.and.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v1i1: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.and.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v2i1: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.and.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v4i1: ; CHECK: # %bb.0: @@ -38,8 +32,6 @@ define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.and.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.and.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v16i1: ; CHECK: # %bb.0: @@ -62,8 +52,6 @@ define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex ret <16 x i1> %v } -declare @llvm.vp.and.nxv1i1(, , , i32) - define @and_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv1i1: ; CHECK: # %bb.0: @@ -74,8 +62,6 @@ define @and_nxv1i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv2i1(, , , i32) - define @and_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv2i1: ; CHECK: # %bb.0: @@ -86,8 +72,6 @@ define @and_nxv2i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv4i1(, , , i32) - define @and_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv4i1: ; CHECK: # %bb.0: @@ -98,8 +82,6 @@ define @and_nxv4i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv8i1(, , , i32) - define @and_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv8i1: ; CHECK: # %bb.0: @@ -110,8 +92,6 @@ define @and_nxv8i1( %b, %c, ret %v } -declare @llvm.vp.and.nxv16i1(, , , i32) - define @and_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv16i1: ; CHECK: # %bb.0: @@ -122,8 +102,6 @@ define @and_nxv16i1( %b, ret %v } -declare @llvm.vp.and.nxv32i1(, , , i32) - define @and_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv32i1: ; CHECK: # %bb.0: @@ -134,8 +112,6 @@ define @and_nxv32i1( %b, ret %v } -declare @llvm.vp.and.nxv64i1(, , , i32) - define @and_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv64i1: ; CHECK: # %bb.0: @@ -146,8 +122,6 @@ define @and_nxv64i1( %b, ret %v } -declare <1 x i1> @llvm.vp.or.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v1i1: ; CHECK: # %bb.0: @@ -158,8 +132,6 @@ define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.or.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v2i1: ; CHECK: # %bb.0: @@ -170,8 +142,6 @@ define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.or.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v4i1: ; CHECK: # %bb.0: @@ -182,8 +152,6 @@ define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.or.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v8i1: ; CHECK: # %bb.0: @@ -194,8 +162,6 @@ define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.or.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: @@ -206,8 +172,6 @@ define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext ret <16 x i1> %v } -declare @llvm.vp.or.nxv1i1(, , , i32) - define @or_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv1i1: ; CHECK: # %bb.0: @@ -218,8 +182,6 @@ define @or_nxv1i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv2i1(, , , i32) - define @or_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv2i1: ; CHECK: # %bb.0: @@ -230,8 +192,6 @@ define @or_nxv2i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv4i1(, , , i32) - define @or_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv4i1: ; CHECK: # %bb.0: @@ -242,8 +202,6 @@ define @or_nxv4i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv8i1(, , , i32) - define @or_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv8i1: ; CHECK: # %bb.0: @@ -254,8 +212,6 @@ define @or_nxv8i1( %b, %c, ret %v } -declare @llvm.vp.or.nxv16i1(, , , i32) - define @or_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv16i1: ; CHECK: # %bb.0: @@ -266,8 +222,6 @@ define @or_nxv16i1( %b, ret %v } -declare @llvm.vp.or.nxv32i1(, , , i32) - define @or_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv32i1: ; CHECK: # %bb.0: @@ -278,8 +232,6 @@ define @or_nxv32i1( %b, ret %v } -declare @llvm.vp.or.nxv64i1(, , , i32) - define @or_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv64i1: ; CHECK: # %bb.0: @@ -290,8 +242,6 @@ define @or_nxv64i1( %b, ret %v } -declare <1 x i1> @llvm.vp.xor.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32) - define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v1i1: ; CHECK: # %bb.0: @@ -302,8 +252,6 @@ define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %ev ret <1 x i1> %v } -declare <2 x i1> @llvm.vp.xor.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32) - define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v2i1: ; CHECK: # %bb.0: @@ -314,8 +262,6 @@ define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %ev ret <2 x i1> %v } -declare <4 x i1> @llvm.vp.xor.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32) - define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v4i1: ; CHECK: # %bb.0: @@ -326,8 +272,6 @@ define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %ev ret <4 x i1> %v } -declare <8 x i1> @llvm.vp.xor.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v8i1: ; CHECK: # %bb.0: @@ -338,8 +282,6 @@ define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %ev ret <8 x i1> %v } -declare <16 x i1> @llvm.vp.xor.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32) - define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v16i1: ; CHECK: # %bb.0: @@ -350,8 +292,6 @@ define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex ret <16 x i1> %v } -declare @llvm.vp.xor.nxv1i1(, , , i32) - define @xor_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv1i1: ; CHECK: # %bb.0: @@ -362,8 +302,6 @@ define @xor_nxv1i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv2i1(, , , i32) - define @xor_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv2i1: ; CHECK: # %bb.0: @@ -374,8 +312,6 @@ define @xor_nxv2i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv4i1(, , , i32) - define @xor_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv4i1: ; CHECK: # %bb.0: @@ -386,8 +322,6 @@ define @xor_nxv4i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv8i1(, , , i32) - define @xor_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv8i1: ; CHECK: # %bb.0: @@ -398,8 +332,6 @@ define @xor_nxv8i1( %b, %c, ret %v } -declare @llvm.vp.xor.nxv16i1(, , , i32) - define @xor_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv16i1: ; CHECK: # %bb.0: @@ -410,8 +342,6 @@ define @xor_nxv16i1( %b, ret %v } -declare @llvm.vp.xor.nxv32i1(, , , i32) - define @xor_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv32i1: ; CHECK: # %bb.0: @@ -422,8 +352,6 @@ define @xor_nxv32i1( %b, ret %v } -declare @llvm.vp.xor.nxv64i1(, , , i32) - define @xor_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll index 5c0a6ac82d8cf..62cee4057a56d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll @@ -13,7 +13,6 @@ define @masked_load_nxv1bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv1bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1bf16(ptr, i32, , ) define @masked_load_nxv1f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f16: @@ -24,7 +23,6 @@ define @masked_load_nxv1f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv1f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1f16(ptr, i32, , ) define @masked_load_nxv1f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f32: @@ -35,7 +33,6 @@ define @masked_load_nxv1f32(ptr %a, %mask %load = call @llvm.masked.load.nxv1f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1f32(ptr, i32, , ) define @masked_load_nxv1f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv1f64: @@ -46,7 +43,6 @@ define @masked_load_nxv1f64(ptr %a, %mas %load = call @llvm.masked.load.nxv1f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1f64(ptr, i32, , ) define @masked_load_nxv2bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2bf16: @@ -57,7 +53,6 @@ define @masked_load_nxv2bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv2bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2bf16(ptr, i32, , ) define @masked_load_nxv2f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f16: @@ -68,7 +63,6 @@ define @masked_load_nxv2f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv2f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2f16(ptr, i32, , ) define @masked_load_nxv2f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f32: @@ -79,7 +73,6 @@ define @masked_load_nxv2f32(ptr %a, %mask %load = call @llvm.masked.load.nxv2f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2f32(ptr, i32, , ) define @masked_load_nxv2f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2f64: @@ -90,7 +83,6 @@ define @masked_load_nxv2f64(ptr %a, %mas %load = call @llvm.masked.load.nxv2f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2f64(ptr, i32, , ) define @masked_load_nxv4bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4bf16: @@ -101,7 +93,6 @@ define @masked_load_nxv4bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv4bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4bf16(ptr, i32, , ) define @masked_load_nxv4f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f16: @@ -112,7 +103,6 @@ define @masked_load_nxv4f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv4f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4f16(ptr, i32, , ) define @masked_load_nxv4f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f32: @@ -123,7 +113,6 @@ define @masked_load_nxv4f32(ptr %a, %mask %load = call @llvm.masked.load.nxv4f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4f32(ptr, i32, , ) define @masked_load_nxv4f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4f64: @@ -134,7 +123,6 @@ define @masked_load_nxv4f64(ptr %a, %mas %load = call @llvm.masked.load.nxv4f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4f64(ptr, i32, , ) define @masked_load_nxv8bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8bf16: @@ -145,7 +133,6 @@ define @masked_load_nxv8bf16(ptr %a, %ma %load = call @llvm.masked.load.nxv8bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8bf16(ptr, i32, , ) define @masked_load_nxv8f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f16: @@ -156,7 +143,6 @@ define @masked_load_nxv8f16(ptr %a, %mask) %load = call @llvm.masked.load.nxv8f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8f16(ptr, i32, , ) define @masked_load_nxv8f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f32: @@ -167,7 +153,6 @@ define @masked_load_nxv8f32(ptr %a, %mask %load = call @llvm.masked.load.nxv8f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8f32(ptr, i32, , ) define @masked_load_nxv8f64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8f64: @@ -178,7 +163,6 @@ define @masked_load_nxv8f64(ptr %a, %mas %load = call @llvm.masked.load.nxv8f64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8f64(ptr, i32, , ) define @masked_load_nxv16bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16bf16: @@ -189,7 +173,6 @@ define @masked_load_nxv16bf16(ptr %a, %load = call @llvm.masked.load.nxv16bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16bf16(ptr, i32, , ) define @masked_load_nxv16f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16f16: @@ -200,7 +183,6 @@ define @masked_load_nxv16f16(ptr %a, %ma %load = call @llvm.masked.load.nxv16f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16f16(ptr, i32, , ) define @masked_load_nxv16f32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16f32: @@ -211,7 +193,6 @@ define @masked_load_nxv16f32(ptr %a, %m %load = call @llvm.masked.load.nxv16f32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16f32(ptr, i32, , ) define @masked_load_nxv32bf16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32bf16: @@ -222,7 +203,6 @@ define @masked_load_nxv32bf16(ptr %a, %load = call @llvm.masked.load.nxv32bf16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32bf16(ptr, i32, , ) define @masked_load_nxv32f16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32f16: @@ -233,4 +213,3 @@ define @masked_load_nxv32f16(ptr %a, %ma %load = call @llvm.masked.load.nxv32f16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32f16(ptr, i32, , ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll index 41cc500f4a610..5fcf10d446a33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll @@ -11,7 +11,6 @@ define @masked_load_nxv1i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv1i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1i64(ptr, i32, , ) define @masked_load_nxv2i64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i64: @@ -22,7 +21,6 @@ define @masked_load_nxv2i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv2i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i64(ptr, i32, , ) define @masked_load_nxv4i64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i64: @@ -33,7 +31,6 @@ define @masked_load_nxv4i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv4i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i64(ptr, i32, , ) define @masked_load_nxv8i64(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i64: @@ -44,4 +41,3 @@ define @masked_load_nxv8i64(ptr %a, %mask) %load = call @llvm.masked.load.nxv8i64(ptr %a, i32 8, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i64(ptr, i32, , ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll index 94794a74b2ced..40b906a481daa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll @@ -59,7 +59,6 @@ define @masked_load_nxv1i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv1i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1i16(ptr, i32, , ) define @masked_load_nxv1i32(ptr %a, %mask) nounwind { ; V-LABEL: masked_load_nxv1i32: @@ -78,7 +77,6 @@ define @masked_load_nxv1i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv1i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv1i32(ptr, i32, , ) define @masked_load_nxv2i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i8: @@ -89,7 +87,6 @@ define @masked_load_nxv2i8(ptr %a, %mask) no %load = call @llvm.masked.load.nxv2i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i8(ptr, i32, , ) define @masked_load_nxv2i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i16: @@ -100,7 +97,6 @@ define @masked_load_nxv2i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv2i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i16(ptr, i32, , ) define @masked_load_nxv2i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv2i32: @@ -111,7 +107,6 @@ define @masked_load_nxv2i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv2i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv2i32(ptr, i32, , ) define @masked_load_nxv4i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i8: @@ -122,7 +117,6 @@ define @masked_load_nxv4i8(ptr %a, %mask) no %load = call @llvm.masked.load.nxv4i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i8(ptr, i32, , ) define @masked_load_nxv4i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i16: @@ -133,7 +127,6 @@ define @masked_load_nxv4i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv4i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i16(ptr, i32, , ) define @masked_load_nxv4i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv4i32: @@ -144,7 +137,6 @@ define @masked_load_nxv4i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv4i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv4i32(ptr, i32, , ) define @masked_load_nxv8i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i8: @@ -155,7 +147,6 @@ define @masked_load_nxv8i8(ptr %a, %mask) no %load = call @llvm.masked.load.nxv8i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i8(ptr, i32, , ) define @masked_load_nxv8i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i16: @@ -166,7 +157,6 @@ define @masked_load_nxv8i16(ptr %a, %mask) %load = call @llvm.masked.load.nxv8i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i16(ptr, i32, , ) define @masked_load_nxv8i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv8i32: @@ -177,7 +167,6 @@ define @masked_load_nxv8i32(ptr %a, %mask) %load = call @llvm.masked.load.nxv8i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv8i32(ptr, i32, , ) define @masked_load_nxv16i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i8: @@ -188,7 +177,6 @@ define @masked_load_nxv16i8(ptr %a, %mask) %load = call @llvm.masked.load.nxv16i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16i8(ptr, i32, , ) define @masked_load_nxv16i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i16: @@ -199,7 +187,6 @@ define @masked_load_nxv16i16(ptr %a, %mas %load = call @llvm.masked.load.nxv16i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16i16(ptr, i32, , ) define @masked_load_nxv16i32(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv16i32: @@ -210,7 +197,6 @@ define @masked_load_nxv16i32(ptr %a, %mas %load = call @llvm.masked.load.nxv16i32(ptr %a, i32 4, %mask, poison) ret %load } -declare @llvm.masked.load.nxv16i32(ptr, i32, , ) define @masked_load_nxv32i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32i8: @@ -221,7 +207,6 @@ define @masked_load_nxv32i8(ptr %a, %mask) %load = call @llvm.masked.load.nxv32i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32i8(ptr, i32, , ) define @masked_load_nxv32i16(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv32i16: @@ -232,7 +217,6 @@ define @masked_load_nxv32i16(ptr %a, %mas %load = call @llvm.masked.load.nxv32i16(ptr %a, i32 2, %mask, poison) ret %load } -declare @llvm.masked.load.nxv32i16(ptr, i32, , ) define @masked_load_nxv64i8(ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_load_nxv64i8: @@ -243,7 +227,6 @@ define @masked_load_nxv64i8(ptr %a, %mask) %load = call @llvm.masked.load.nxv64i8(ptr %a, i32 1, %mask, poison) ret %load } -declare @llvm.masked.load.nxv64i8(ptr, i32, , ) define @masked_load_zero_mask(ptr %a) nounwind { ; CHECK-LABEL: masked_load_zero_mask: diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll index 586af50266f94..0b874fff5c526 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll @@ -13,7 +13,6 @@ define void @masked_store_nxv1bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv1bf16.p0(, ptr, i32, ) define void @masked_store_nxv1f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f16: @@ -24,7 +23,6 @@ define void @masked_store_nxv1f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv1f16.p0(, ptr, i32, ) define void @masked_store_nxv1f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f32: @@ -35,7 +33,6 @@ define void @masked_store_nxv1f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv1f32.p0(, ptr, i32, ) define void @masked_store_nxv1f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f64: @@ -46,7 +43,6 @@ define void @masked_store_nxv1f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv1f64.p0(, ptr, i32, ) define void @masked_store_nxv2bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2bf16: @@ -57,7 +53,6 @@ define void @masked_store_nxv2bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv2bf16.p0(, ptr, i32, ) define void @masked_store_nxv2f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f16: @@ -68,7 +63,6 @@ define void @masked_store_nxv2f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv2f16.p0(, ptr, i32, ) define void @masked_store_nxv2f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f32: @@ -79,7 +73,6 @@ define void @masked_store_nxv2f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv2f32.p0(, ptr, i32, ) define void @masked_store_nxv2f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f64: @@ -90,7 +83,6 @@ define void @masked_store_nxv2f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv2f64.p0(, ptr, i32, ) define void @masked_store_nxv4bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4bf16: @@ -101,7 +93,6 @@ define void @masked_store_nxv4bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv4bf16.p0(, ptr, i32, ) define void @masked_store_nxv4f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f16: @@ -112,7 +103,6 @@ define void @masked_store_nxv4f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv4f16.p0(, ptr, i32, ) define void @masked_store_nxv4f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f32: @@ -123,7 +113,6 @@ define void @masked_store_nxv4f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv4f32.p0(, ptr, i32, ) define void @masked_store_nxv4f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f64: @@ -134,7 +123,6 @@ define void @masked_store_nxv4f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv4f64.p0(, ptr, i32, ) define void @masked_store_nxv8bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8bf16: @@ -145,7 +133,6 @@ define void @masked_store_nxv8bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv8bf16.p0(, ptr, i32, ) define void @masked_store_nxv8f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f16: @@ -156,7 +143,6 @@ define void @masked_store_nxv8f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv8f16.p0(, ptr, i32, ) define void @masked_store_nxv8f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f32: @@ -167,7 +153,6 @@ define void @masked_store_nxv8f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv8f32.p0(, ptr, i32, ) define void @masked_store_nxv8f64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f64: @@ -178,7 +163,6 @@ define void @masked_store_nxv8f64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.nxv8f64.p0(, ptr, i32, ) define void @masked_store_nxv16bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16bf16: @@ -189,7 +173,6 @@ define void @masked_store_nxv16bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv16bf16.p0(, ptr, i32, ) define void @masked_store_nxv16f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16f16: @@ -200,7 +183,6 @@ define void @masked_store_nxv16f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv16f16.p0(, ptr, i32, ) define void @masked_store_nxv16f32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16f32: @@ -211,7 +193,6 @@ define void @masked_store_nxv16f32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.nxv16f32.p0(, ptr, i32, ) define void @masked_store_nxv32bf16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32bf16: @@ -222,7 +203,6 @@ define void @masked_store_nxv32bf16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv32bf16.p0(, ptr, i32, ) define void @masked_store_nxv32f16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32f16: @@ -233,4 +213,3 @@ define void @masked_store_nxv32f16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.nxv32f16.p0(, ptr, i32, ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll index 602ee6105af5f..5936bc73c1a94 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int-e64.ll @@ -11,7 +11,6 @@ define void @masked_store_nxv1i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v1i64.p0(, ptr, i32, ) define void @masked_store_nxv2i64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i64: @@ -22,7 +21,6 @@ define void @masked_store_nxv2i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v2i64.p0(, ptr, i32, ) define void @masked_store_nxv4i64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i64: @@ -33,7 +31,6 @@ define void @masked_store_nxv4i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v4i64.p0(, ptr, i32, ) define void @masked_store_nxv8i64( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i64: @@ -44,4 +41,3 @@ define void @masked_store_nxv8i64( %val, ptr %a, %val, ptr %a, i32 8, %mask) ret void } -declare void @llvm.masked.store.v8i64.p0(, ptr, i32, ) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll index 92893a7dd463a..dc83cdc695b54 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll @@ -21,7 +21,6 @@ define void @masked_store_nxv1i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v1i8.p0(, ptr, i32, ) define void @masked_store_nxv1i16( %val, ptr %a, %mask) nounwind { ; V-LABEL: masked_store_nxv1i16: @@ -40,7 +39,6 @@ define void @masked_store_nxv1i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v1i16.p0(, ptr, i32, ) define void @masked_store_nxv1i32( %val, ptr %a, %mask) nounwind { ; V-LABEL: masked_store_nxv1i32: @@ -59,7 +57,6 @@ define void @masked_store_nxv1i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v1i32.p0(, ptr, i32, ) define void @masked_store_nxv2i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i8: @@ -70,7 +67,6 @@ define void @masked_store_nxv2i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v2i8.p0(, ptr, i32, ) define void @masked_store_nxv2i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i16: @@ -81,7 +77,6 @@ define void @masked_store_nxv2i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v2i16.p0(, ptr, i32, ) define void @masked_store_nxv2i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i32: @@ -92,7 +87,6 @@ define void @masked_store_nxv2i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v2i32.p0(, ptr, i32, ) define void @masked_store_nxv4i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i8: @@ -103,7 +97,6 @@ define void @masked_store_nxv4i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v4i8.p0(, ptr, i32, ) define void @masked_store_nxv4i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i16: @@ -114,7 +107,6 @@ define void @masked_store_nxv4i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v4i16.p0(, ptr, i32, ) define void @masked_store_nxv4i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i32: @@ -125,7 +117,6 @@ define void @masked_store_nxv4i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v4i32.p0(, ptr, i32, ) define void @masked_store_nxv8i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i8: @@ -136,7 +127,6 @@ define void @masked_store_nxv8i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v8i8.p0(, ptr, i32, ) define void @masked_store_nxv8i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i16: @@ -147,7 +137,6 @@ define void @masked_store_nxv8i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v8i16.p0(, ptr, i32, ) define void @masked_store_nxv8i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i32: @@ -158,7 +147,6 @@ define void @masked_store_nxv8i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v8i32.p0(, ptr, i32, ) define void @masked_store_nxv16i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i8: @@ -169,7 +157,6 @@ define void @masked_store_nxv16i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v16i8.p0(, ptr, i32, ) define void @masked_store_nxv16i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i16: @@ -180,7 +167,6 @@ define void @masked_store_nxv16i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v16i16.p0(, ptr, i32, ) define void @masked_store_nxv16i32( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i32: @@ -191,7 +177,6 @@ define void @masked_store_nxv16i32( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v16i32.p0(, ptr, i32, ) define void @masked_store_nxv32i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32i8: @@ -202,7 +187,6 @@ define void @masked_store_nxv32i8( %val, ptr %a, %val, ptr %a, i32 1, %mask) ret void } -declare void @llvm.masked.store.v32i8.p0(, ptr, i32, ) define void @masked_store_nxv32i16( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32i16: @@ -213,7 +197,6 @@ define void @masked_store_nxv32i16( %val, ptr %a, %val, ptr %a, i32 2, %mask) ret void } -declare void @llvm.masked.store.v32i16.p0(, ptr, i32, ) define void @masked_store_nxv64i8( %val, ptr %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv64i8: @@ -224,7 +207,6 @@ define void @masked_store_nxv64i8( %val, ptr %a, %val, ptr %a, i32 4, %mask) ret void } -declare void @llvm.masked.store.v64i8.p0(, ptr, i32, ) define void @masked_store_zero_mask( %val, ptr %a) nounwind { ; CHECK-LABEL: masked_store_zero_mask: diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll index 42d0bc57c6b5c..08e25246a6092 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_nxv1i64_nxv1i64(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(ptr %0, %1, iXLen %2, ptr %3) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -63,14 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -88,14 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(ptr %0, %1, %2, iXLen %3) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -108,13 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -132,14 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -158,14 +113,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -183,14 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -208,13 +147,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -232,13 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -255,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen); define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: @@ -279,14 +198,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -304,14 +215,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -329,14 +232,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -354,14 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -379,14 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -404,14 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -430,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -479,14 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,15 +353,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -531,15 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -558,15 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -585,15 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -612,13 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -661,13 +460,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -685,13 +477,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -709,13 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -733,13 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -758,13 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -782,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -807,12 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -852,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -874,14 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -899,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -924,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -949,12 +665,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -971,12 +681,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -993,12 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1016,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1039,12 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1062,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1085,12 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1131,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1155,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1181,14 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1207,14 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1233,14 +870,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1259,11 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1279,12 +903,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -1303,12 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -1327,18 +939,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f16( - , - , - , - , - iXLen); - -declare @llvm.riscv.vmfeq.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1361,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1397,13 +990,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1434,12 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll index 2e8b6c5fcca22..a1be60e689f20 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -61,13 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -85,13 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, ptr %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -151,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +129,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -199,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -221,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -242,12 +177,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -264,13 +193,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -288,13 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -312,13 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -336,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -360,13 +261,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -384,13 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -408,12 +295,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,12 +312,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -478,14 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,14 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -530,14 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -556,14 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -582,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -605,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -651,12 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -674,12 +486,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -697,12 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -720,12 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +537,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -765,11 +553,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -808,11 +585,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -853,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -877,11 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -902,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -923,11 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -965,11 +703,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -986,11 +719,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1007,11 +735,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1049,11 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1070,12 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1093,13 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1117,13 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1141,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1165,13 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1189,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1209,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll index ca9b6245a8570..c11cc4f2b498c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -61,13 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -85,13 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, ptr %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -151,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +129,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -199,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -221,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -242,12 +177,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -264,13 +193,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -288,13 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -312,13 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -336,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -360,13 +261,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -384,13 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -408,12 +295,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,12 +312,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -478,14 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,14 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -530,14 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -556,14 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -582,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -605,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -651,12 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -674,12 +486,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -697,12 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -720,12 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +537,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -765,11 +553,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -808,11 +585,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -853,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -877,11 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -902,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -923,11 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -965,11 +703,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -986,11 +719,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1007,11 +735,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1049,11 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1070,12 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1093,13 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1117,13 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1141,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1165,13 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1189,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1209,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll index df9e84c66cefa..e2b2d7b4a6a34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh\ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -61,13 +49,6 @@ entry: ret %b } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -85,13 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( - , - ptr, - , - , - iXLen, - iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, ptr %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -151,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +129,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -199,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -221,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -242,12 +177,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -264,13 +193,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -288,13 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -312,13 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -336,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -360,13 +261,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -384,13 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -408,12 +295,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,12 +312,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -478,14 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -504,14 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -530,14 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -556,14 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen, - iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -582,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -605,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -651,12 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -674,12 +486,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -697,12 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -720,12 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +537,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -765,11 +553,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -808,11 +585,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -829,13 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -853,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -877,11 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -902,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -923,11 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -965,11 +703,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -986,11 +719,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1007,11 +735,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1049,11 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1070,12 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1093,13 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1117,13 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -1141,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1165,13 +851,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1189,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1209,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll index 0e4d709836abd..d888e23d6c59c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -2,14 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( - , - , - i64, - , - i32, - i32); - define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry @@ -54,7 +46,6 @@ entry: ret %a } - ; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: diff --git a/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll b/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll index e020fe1a0aa1a..3203ec8614153 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memcpy-crash-zvl32b.ll @@ -14,4 +14,3 @@ entry: ret void } -declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #1 diff --git a/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll index 2553f563b7d0f..90a8c68b3e96d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll @@ -11,7 +11,6 @@ ; ---------------------------------------------------------------------- ; Fully unaligned cases - define void @unaligned_memcpy1(ptr nocapture %dest, ptr %src) nounwind { ; RV32-BOTH-LABEL: unaligned_memcpy1: ; RV32-BOTH: # %bb.0: # %entry @@ -645,7 +644,6 @@ entry: ret void } - ; ---------------------------------------------------------------------- ; Fully aligned cases @@ -1031,7 +1029,6 @@ entry: ; ------------------------------------------------------------------------ ; A few partially aligned cases - define void @memcpy16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind { ; RV32-LABEL: memcpy16_align4: ; RV32: # %bb.0: # %entry @@ -1112,6 +1109,3 @@ entry: ret i32 0 } - -declare void @llvm.memcpy.inline.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind -declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll index 8190a82d7035b..d5f8cc3b6ee93 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -2,13 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \ ; RUN: | FileCheck %s -check-prefix=RV64IV -declare @llvm.riscv.vmacc.nxv64i8.nxv64i8( - , - , - , - i64, - i64); - define @callee( %arg0, %arg1, %arg2) { ; RV64IV-LABEL: callee: ; RV64IV: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll index 2c11bd1ff5dc5..41c744e9347a0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll @@ -9,9 +9,6 @@ ; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } -declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind -declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind - ; ///////////////////////////////////////////////////////////////////////////// define void @memset_1(ptr %a, i8 %value) nounwind { @@ -620,7 +617,6 @@ define void @aligned_memset_zero_8(ptr %a) nounwind { ret void } - define void @aligned_memset_zero_16(ptr %a) nounwind { ; RV32-BOTH-LABEL: aligned_memset_zero_16: ; RV32-BOTH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll index 65ac424c2359a..ae3b9db74080c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,RV64 -declare @llvm.masked.gather.nxv1i8.nxv1p0(, i32, , ) - define @mgather_nxv1i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i8: ; RV32: # %bb.0: @@ -32,8 +30,6 @@ define @mgather_nxv1i8( %ptrs, %v } -declare @llvm.masked.gather.nxv2i8.nxv2p0(, i32, , ) - define @mgather_nxv2i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i8: ; RV32: # %bb.0: @@ -180,8 +176,6 @@ define @mgather_nxv2i8_zextload_nxv2i64( %p ret %ev } -declare @llvm.masked.gather.nxv4i8.nxv4p0(, i32, , ) - define @mgather_nxv4i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i8: ; RV32: # %bb.0: @@ -234,8 +228,6 @@ define @mgather_falsemask_nxv4i8( %ptrs, %v } -declare @llvm.masked.gather.nxv8i8.nxv8p0(, i32, , ) - define @mgather_nxv8i8( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i8: ; RV32: # %bb.0: @@ -277,8 +269,6 @@ define @mgather_baseidx_nxv8i8(ptr %base, %i ret %v } -declare @llvm.masked.gather.nxv1i16.nxv1p0(, i32, , ) - define @mgather_nxv1i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i16: ; RV32: # %bb.0: @@ -297,8 +287,6 @@ define @mgather_nxv1i16( %ptrs, %v } -declare @llvm.masked.gather.nxv2i16.nxv2p0(, i32, , ) - define @mgather_nxv2i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i16: ; RV32: # %bb.0: @@ -403,8 +391,6 @@ define @mgather_nxv2i16_zextload_nxv2i64( % ret %ev } -declare @llvm.masked.gather.nxv4i16.nxv4p0(, i32, , ) - define @mgather_nxv4i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i16: ; RV32: # %bb.0: @@ -457,8 +443,6 @@ define @mgather_falsemask_nxv4i16( %ptrs, < ret %v } -declare @llvm.masked.gather.nxv8i16.nxv8p0(, i32, , ) - define @mgather_nxv8i16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i16: ; RV32: # %bb.0: @@ -566,8 +550,6 @@ define @mgather_baseidx_nxv8i16(ptr %base, ret %v } -declare @llvm.masked.gather.nxv1i32.nxv1p0(, i32, , ) - define @mgather_nxv1i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i32: ; RV32: # %bb.0: @@ -586,8 +568,6 @@ define @mgather_nxv1i32( %ptrs, %v } -declare @llvm.masked.gather.nxv2i32.nxv2p0(, i32, , ) - define @mgather_nxv2i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i32: ; RV32: # %bb.0: @@ -650,8 +630,6 @@ define @mgather_nxv2i32_zextload_nxv2i64( % ret %ev } -declare @llvm.masked.gather.nxv4i32.nxv4p0(, i32, , ) - define @mgather_nxv4i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i32: ; RV32: # %bb.0: @@ -703,8 +681,6 @@ define @mgather_falsemask_nxv4i32( %ptrs, < ret %v } -declare @llvm.masked.gather.nxv8i32.nxv8p0(, i32, , ) - define @mgather_nxv8i32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i32: ; RV32: # %bb.0: @@ -877,8 +853,6 @@ define @mgather_baseidx_nxv8i32(ptr %base, ret %v } -declare @llvm.masked.gather.nxv1i64.nxv1p0(, i32, , ) - define @mgather_nxv1i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1i64: ; RV32: # %bb.0: @@ -897,8 +871,6 @@ define @mgather_nxv1i64( %ptrs, %v } -declare @llvm.masked.gather.nxv2i64.nxv2p0(, i32, , ) - define @mgather_nxv2i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2i64: ; RV32: # %bb.0: @@ -917,8 +889,6 @@ define @mgather_nxv2i64( %ptrs, %v } -declare @llvm.masked.gather.nxv4i64.nxv4p0(, i32, , ) - define @mgather_nxv4i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4i64: ; RV32: # %bb.0: @@ -964,8 +934,6 @@ define @mgather_falsemask_nxv4i64( %ptrs, < ret %v } -declare @llvm.masked.gather.nxv8i64.nxv8p0(, i32, , ) - define @mgather_nxv8i64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8i64: ; RV32: # %bb.0: @@ -1211,11 +1179,6 @@ define @mgather_baseidx_nxv8i64(ptr %base, ret %v } -declare @llvm.masked.gather.nxv16i64.nxv16p0(, i32, , ) - -declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64 %idx) -declare @llvm.vector.insert.nxv8p0.nxv16p0(, , i64 %idx) - define void @mgather_nxv16i64( %ptrs0, %ptrs1, %m, %passthru0, %passthru1, ptr %out) { ; RV32-LABEL: mgather_nxv16i64: ; RV32: # %bb.0: @@ -1262,8 +1225,6 @@ define void @mgather_nxv16i64( %ptrs0, %ptr ret void } -declare @llvm.masked.gather.nxv1bf16.nxv1p0(, i32, , ) - define @mgather_nxv1bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1bf16: ; RV32: # %bb.0: @@ -1282,8 +1243,6 @@ define @mgather_nxv1bf16( %ptrs, %v } -declare @llvm.masked.gather.nxv2bf16.nxv2p0(, i32, , ) - define @mgather_nxv2bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2bf16: ; RV32: # %bb.0: @@ -1302,8 +1261,6 @@ define @mgather_nxv2bf16( %ptrs, %v } -declare @llvm.masked.gather.nxv4bf16.nxv4p0(, i32, , ) - define @mgather_nxv4bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4bf16: ; RV32: # %bb.0: @@ -1356,8 +1313,6 @@ define @mgather_falsemask_nxv4bf16( %ptr ret %v } -declare @llvm.masked.gather.nxv8bf16.nxv8p0(, i32, , ) - define @mgather_nxv8bf16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8bf16: ; RV32: # %bb.0: @@ -1465,8 +1420,6 @@ define @mgather_baseidx_nxv8bf16(ptr %base, %v } -declare @llvm.masked.gather.nxv1f16.nxv1p0(, i32, , ) - define @mgather_nxv1f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f16: ; RV32: # %bb.0: @@ -1485,8 +1438,6 @@ define @mgather_nxv1f16( %ptrs, %v } -declare @llvm.masked.gather.nxv2f16.nxv2p0(, i32, , ) - define @mgather_nxv2f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f16: ; RV32: # %bb.0: @@ -1505,8 +1456,6 @@ define @mgather_nxv2f16( %ptrs, %v } -declare @llvm.masked.gather.nxv4f16.nxv4p0(, i32, , ) - define @mgather_nxv4f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f16: ; RV32: # %bb.0: @@ -1559,8 +1508,6 @@ define @mgather_falsemask_nxv4f16( %ptrs, ret %v } -declare @llvm.masked.gather.nxv8f16.nxv8p0(, i32, , ) - define @mgather_nxv8f16( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f16: ; RV32: # %bb.0: @@ -1668,8 +1615,6 @@ define @mgather_baseidx_nxv8f16(ptr %base, %v } -declare @llvm.masked.gather.nxv1f32.nxv1p0(, i32, , ) - define @mgather_nxv1f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f32: ; RV32: # %bb.0: @@ -1688,8 +1633,6 @@ define @mgather_nxv1f32( %ptrs, %v } -declare @llvm.masked.gather.nxv2f32.nxv2p0(, i32, , ) - define @mgather_nxv2f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f32: ; RV32: # %bb.0: @@ -1708,8 +1651,6 @@ define @mgather_nxv2f32( %ptrs, %v } -declare @llvm.masked.gather.nxv4f32.nxv4p0(, i32, , ) - define @mgather_nxv4f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f32: ; RV32: # %bb.0: @@ -1761,8 +1702,6 @@ define @mgather_falsemask_nxv4f32( %ptrs, ret %v } -declare @llvm.masked.gather.nxv8f32.nxv8p0(, i32, , ) - define @mgather_nxv8f32( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f32: ; RV32: # %bb.0: @@ -1935,8 +1874,6 @@ define @mgather_baseidx_nxv8f32(ptr %base, %v } -declare @llvm.masked.gather.nxv1f64.nxv1p0(, i32, , ) - define @mgather_nxv1f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv1f64: ; RV32: # %bb.0: @@ -1955,8 +1892,6 @@ define @mgather_nxv1f64( %ptrs, %v } -declare @llvm.masked.gather.nxv2f64.nxv2p0(, i32, , ) - define @mgather_nxv2f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv2f64: ; RV32: # %bb.0: @@ -1975,8 +1910,6 @@ define @mgather_nxv2f64( %ptrs, %v } -declare @llvm.masked.gather.nxv4f64.nxv4p0(, i32, , ) - define @mgather_nxv4f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv4f64: ; RV32: # %bb.0: @@ -2022,8 +1955,6 @@ define @mgather_falsemask_nxv4f64( %ptrs ret %v } -declare @llvm.masked.gather.nxv8f64.nxv8p0(, i32, , ) - define @mgather_nxv8f64( %ptrs, %m, %passthru) { ; RV32-LABEL: mgather_nxv8f64: ; RV32: # %bb.0: @@ -2269,8 +2200,6 @@ define @mgather_baseidx_nxv8f64(ptr %base, %v } -declare @llvm.masked.gather.nxv16i8.nxv16p0(, i32, , ) - define @mgather_baseidx_nxv16i8(ptr %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv16i8: ; RV32: # %bb.0: @@ -2302,8 +2231,6 @@ define @mgather_baseidx_nxv16i8(ptr %base, ret %v } -declare @llvm.masked.gather.nxv32i8.nxv32p0(, i32, , ) - define @mgather_baseidx_nxv32i8(ptr %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv32i8: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll index 489323b323110..0f3ebb9e625ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll @@ -4,24 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - -declare @llvm.riscv.vadd.nxv1i32.nxv1i32( - , - , - , - iXLen); - -declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @test_half_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { ; CHECK-LABEL: test_half_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll index ac26a014aaa64..00300cc09607d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll @@ -124,6 +124,3 @@ define void @stride_one_store(i64 %n, ptr %p) { ret void } -declare @llvm.stepvector.nxv1i64() -declare void @llvm.masked.scatter.nxv2i32.nxv2p0(, , i32, ) -declare void @llvm.masked.scatter.nxv1i64.nxv1p0(, , i32, ) diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll index c961d1a9e32e4..5eb6553aaba79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,RV64 -declare void @llvm.masked.scatter.nxv1i8.nxv1p0(, , i32, ) - define void @mscatter_nxv1i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i8: ; RV32: # %bb.0: @@ -30,8 +28,6 @@ define void @mscatter_nxv1i8( %val, %ptrs, < ret void } -declare void @llvm.masked.scatter.nxv2i8.nxv2p0(, , i32, ) - define void @mscatter_nxv2i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i8: ; RV32: # %bb.0: @@ -117,8 +113,6 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8( %val, , , i32, ) - define void @mscatter_nxv4i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i8: ; RV32: # %bb.0: @@ -159,8 +153,6 @@ define void @mscatter_falsemask_nxv4i8( %val, , , i32, ) - define void @mscatter_nxv8i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i8: ; RV32: # %bb.0: @@ -198,8 +190,6 @@ define void @mscatter_baseidx_nxv8i8( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i16: ; RV32: # %bb.0: @@ -216,8 +206,6 @@ define void @mscatter_nxv1i16( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv2i16.nxv2p0(, , i32, ) - define void @mscatter_nxv2i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i16: ; RV32: # %bb.0: @@ -276,8 +264,6 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16( %val, , , i32, ) - define void @mscatter_nxv4i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i16: ; RV32: # %bb.0: @@ -318,8 +304,6 @@ define void @mscatter_falsemask_nxv4i16( %val, , , i32, ) - define void @mscatter_nxv8i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i16: ; RV32: # %bb.0: @@ -418,8 +402,6 @@ define void @mscatter_baseidx_nxv8i16( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i32: ; RV32: # %bb.0: @@ -436,8 +418,6 @@ define void @mscatter_nxv1i32( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv2i32.nxv2p0(, , i32, ) - define void @mscatter_nxv2i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i32: ; RV32: # %bb.0: @@ -473,8 +453,6 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32( %val, , , i32, ) - define void @mscatter_nxv4i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i32: ; RV32: # %bb.0: @@ -515,8 +493,6 @@ define void @mscatter_falsemask_nxv4i32( %val, , , i32, ) - define void @mscatter_nxv8i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i32: ; RV32: # %bb.0: @@ -675,8 +651,6 @@ define void @mscatter_baseidx_nxv8i32( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i64: ; RV32: # %bb.0: @@ -693,8 +667,6 @@ define void @mscatter_nxv1i64( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv2i64.nxv2p0(, , i32, ) - define void @mscatter_nxv2i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i64: ; RV32: # %bb.0: @@ -711,8 +683,6 @@ define void @mscatter_nxv2i64( %val, %ptrs, ret void } -declare void @llvm.masked.scatter.nxv4i64.nxv4p0(, , i32, ) - define void @mscatter_nxv4i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i64: ; RV32: # %bb.0: @@ -753,8 +723,6 @@ define void @mscatter_falsemask_nxv4i64( %val, , , i32, ) - define void @mscatter_nxv8i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i64: ; RV32: # %bb.0: @@ -980,8 +948,6 @@ define void @mscatter_baseidx_nxv8i64( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1bf16: ; RV32: # %bb.0: @@ -998,8 +964,6 @@ define void @mscatter_nxv1bf16( %val, %p ret void } -declare void @llvm.masked.scatter.nxv2bf16.nxv2p0(, , i32, ) - define void @mscatter_nxv2bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2bf16: ; RV32: # %bb.0: @@ -1016,8 +980,6 @@ define void @mscatter_nxv2bf16( %val, %p ret void } -declare void @llvm.masked.scatter.nxv4bf16.nxv4p0(, , i32, ) - define void @mscatter_nxv4bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4bf16: ; RV32: # %bb.0: @@ -1058,8 +1020,6 @@ define void @mscatter_falsemask_nxv4bf16( %val, , , i32, ) - define void @mscatter_nxv8bf16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8bf16: ; RV32: # %bb.0: @@ -1158,8 +1118,6 @@ define void @mscatter_baseidx_nxv8bf16( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f16: ; RV32: # %bb.0: @@ -1176,8 +1134,6 @@ define void @mscatter_nxv1f16( %val, %ptrs ret void } -declare void @llvm.masked.scatter.nxv2f16.nxv2p0(, , i32, ) - define void @mscatter_nxv2f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f16: ; RV32: # %bb.0: @@ -1194,8 +1150,6 @@ define void @mscatter_nxv2f16( %val, %ptrs ret void } -declare void @llvm.masked.scatter.nxv4f16.nxv4p0(, , i32, ) - define void @mscatter_nxv4f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f16: ; RV32: # %bb.0: @@ -1236,8 +1190,6 @@ define void @mscatter_falsemask_nxv4f16( %val, , , i32, ) - define void @mscatter_nxv8f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f16: ; RV32: # %bb.0: @@ -1336,8 +1288,6 @@ define void @mscatter_baseidx_nxv8f16( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f32: ; RV32: # %bb.0: @@ -1354,8 +1304,6 @@ define void @mscatter_nxv1f32( %val, %ptr ret void } -declare void @llvm.masked.scatter.nxv2f32.nxv2p0(, , i32, ) - define void @mscatter_nxv2f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f32: ; RV32: # %bb.0: @@ -1372,8 +1320,6 @@ define void @mscatter_nxv2f32( %val, %ptr ret void } -declare void @llvm.masked.scatter.nxv4f32.nxv4p0(, , i32, ) - define void @mscatter_nxv4f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f32: ; RV32: # %bb.0: @@ -1414,8 +1360,6 @@ define void @mscatter_falsemask_nxv4f32( %val, , , i32, ) - define void @mscatter_nxv8f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f32: ; RV32: # %bb.0: @@ -1574,8 +1518,6 @@ define void @mscatter_baseidx_nxv8f32( %val, ptr %base, , , i32, ) - define void @mscatter_nxv1f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f64: ; RV32: # %bb.0: @@ -1592,8 +1534,6 @@ define void @mscatter_nxv1f64( %val, %pt ret void } -declare void @llvm.masked.scatter.nxv2f64.nxv2p0(, , i32, ) - define void @mscatter_nxv2f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f64: ; RV32: # %bb.0: @@ -1610,8 +1550,6 @@ define void @mscatter_nxv2f64( %val, %pt ret void } -declare void @llvm.masked.scatter.nxv4f64.nxv4p0(, , i32, ) - define void @mscatter_nxv4f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f64: ; RV32: # %bb.0: @@ -1652,8 +1590,6 @@ define void @mscatter_falsemask_nxv4f64( %val, , , i32, ) - define void @mscatter_nxv8f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f64: ; RV32: # %bb.0: @@ -1879,11 +1815,6 @@ define void @mscatter_baseidx_nxv8f64( %val, ptr %base, , , i32, ) - -declare @llvm.vector.insert.nxv8f64.nxv16f64(, , i64) -declare @llvm.vector.insert.nxv8p0.nxv16p0(, , i64) - define void @mscatter_nxv16f64( %val0, %val1, %ptrs0, %ptrs1, %m) { ; RV32-LABEL: mscatter_nxv16f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll index ec83c4e87cebf..11305d9efbea1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll @@ -3,7 +3,6 @@ @__const.test.var_45 = private unnamed_addr constant [2 x i8] c"\D1S", align 1 @__const.test.var_101 = private unnamed_addr constant [2 x i8] c"\830", align 1 -; Function Attrs: nounwind vscale_range(2,1024) define dso_local void @test(ptr nocapture noundef %var_99) { ; CHECK-LABEL: test: ; CHECK: # %bb.0: # %entry @@ -36,16 +35,3 @@ entry: ret void } -declare @llvm.riscv.vle.nxv32i8.i64(, ptr nocapture, i64) #1 -declare @llvm.riscv.vmul.nxv32i8.i8.i64(, , i8, i64) #2 -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3 -declare i8 @llvm.riscv.vmv.x.s.nxv32i8() #2 -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64, i64) #3 -declare @llvm.riscv.vmsleu.nxv32i8.i8.i64(, i8, i64) #2 -declare @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(, , , , i64) #2 -declare void @llvm.riscv.vse.nxv32i8.i64(, ptr nocapture, i64) #4 - -attributes #1 = { nofree nounwind memory(read) } -attributes #2 = { nofree nosync nounwind memory(none) } -attributes #3 = { nounwind } -attributes #4 = { nounwind memory(write) } diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll index acc68491d5aee..c8cd78eda799d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -2089,49 +2089,3 @@ define @reverse_nxv12i64( %a) { ret %res } -declare @llvm.vector.reverse.nxv2i1() -declare @llvm.vector.reverse.nxv4i1() -declare @llvm.vector.reverse.nxv8i1() -declare @llvm.vector.reverse.nxv16i1() -declare @llvm.vector.reverse.nxv32i1() -declare @llvm.vector.reverse.nxv64i1() -declare @llvm.vector.reverse.nxv1i8() -declare @llvm.vector.reverse.nxv2i8() -declare @llvm.vector.reverse.nxv4i8() -declare @llvm.vector.reverse.nxv8i8() -declare @llvm.vector.reverse.nxv16i8() -declare @llvm.vector.reverse.nxv32i8() -declare @llvm.vector.reverse.nxv64i8() -declare @llvm.vector.reverse.nxv1i16() -declare @llvm.vector.reverse.nxv2i16() -declare @llvm.vector.reverse.nxv4i16() -declare @llvm.vector.reverse.nxv8i16() -declare @llvm.vector.reverse.nxv16i16() -declare @llvm.vector.reverse.nxv32i16() -declare @llvm.vector.reverse.nxv1i32() -declare @llvm.vector.reverse.nxv2i32() -declare @llvm.vector.reverse.nxv4i32() -declare @llvm.vector.reverse.nxv8i32() -declare @llvm.vector.reverse.nxv16i32() -declare @llvm.vector.reverse.nxv1i64() -declare @llvm.vector.reverse.nxv2i64() -declare @llvm.vector.reverse.nxv4i64() -declare @llvm.vector.reverse.nxv8i64() -declare @llvm.vector.reverse.nxv1f16() -declare @llvm.vector.reverse.nxv2f16() -declare @llvm.vector.reverse.nxv4f16() -declare @llvm.vector.reverse.nxv8f16() -declare @llvm.vector.reverse.nxv16f16() -declare @llvm.vector.reverse.nxv32f16() -declare @llvm.vector.reverse.nxv1f32() -declare @llvm.vector.reverse.nxv2f32() -declare @llvm.vector.reverse.nxv4f32() -declare @llvm.vector.reverse.nxv8f32() -declare @llvm.vector.reverse.nxv16f32() -declare @llvm.vector.reverse.nxv1f64() -declare @llvm.vector.reverse.nxv2f64() -declare @llvm.vector.reverse.nxv4f64() -declare @llvm.vector.reverse.nxv8f64() -declare @llvm.vector.reverse.nxv3i64() -declare @llvm.vector.reverse.nxv6i64() -declare @llvm.vector.reverse.nxv12i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll index 70c2691069276..1f4390c07be61 100644 --- a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll +++ b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll @@ -1,12 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @test_vloxei(ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vloxei: ; CHECK: # %bb.0: # %entry @@ -68,7 +62,6 @@ entry: } ; Test use vp.zext to extend. -declare @llvm.vp.zext.nxvi64.nxv1i8(, , i32) define @test_vloxei4(ptr %ptr, %offset, %m, i32 zeroext %vl) { ; CHECK-LABEL: test_vloxei4: ; CHECK: # %bb.0: # %entry @@ -91,11 +84,6 @@ entry: } ; Test orignal extnened type is enough narrow. -declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( - , - ptr, - , - i64); define @test_vloxei5(ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vloxei5: ; CHECK: # %bb.0: # %entry @@ -160,14 +148,6 @@ entry: ret %res } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @test_vloxei_mask(ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vloxei_mask: ; CHECK: # %bb.0: # %entry @@ -189,12 +169,6 @@ entry: ret %res } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @test_vluxei(ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vluxei: ; CHECK: # %bb.0: # %entry @@ -215,14 +189,6 @@ entry: ret %res } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @test_vluxei_mask(ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vluxei_mask: ; CHECK: # %bb.0: # %entry @@ -244,12 +210,6 @@ entry: ret %res } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @test_vsoxei( %val, ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vsoxei: ; CHECK: # %bb.0: # %entry @@ -270,13 +230,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @test_vsoxei_mask( %val, ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vsoxei_mask: ; CHECK: # %bb.0: # %entry @@ -298,12 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @test_vsuxei( %val, ptr %ptr, %offset, i64 %vl) { ; CHECK-LABEL: test_vsuxei: ; CHECK: # %bb.0: # %entry @@ -324,13 +271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @test_vsuxei_mask( %val, ptr %ptr, %offset, %m, i64 %vl) { ; CHECK-LABEL: test_vsuxei_mask: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll index 64e305f130dd7..67e7f7c7fbd42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.nearbyint.nxv1bf16(, , i32) - define @vp_nearbyint_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_nearbyint_nxv1bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv2bf16(, , i32) - define @vp_nearbyint_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_nearbyint_nxv2bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv4bf16(, , i32) - define @vp_nearbyint_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_nearbyint_nxv4bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv8bf16(, , i32) - define @vp_nearbyint_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_nearbyint_nxv8bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv16bf16(, , i32) - define @vp_nearbyint_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_nearbyint_nxv16bf16_unmasked( %v } -declare @llvm.vp.nearbyint.nxv32bf16(, , i32) - define @vp_nearbyint_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_nearbyint_nxv32bf16_unmasked( @llvm.vp.nearbyint.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.nearbyint.nxv1f16(, , i32) define @vp_nearbyint_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv1f16: @@ -490,8 +477,6 @@ define @vp_nearbyint_nxv1f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv2f16(, , i32) - define @vp_nearbyint_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_nearbyint_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv4f16(, , i32) - define @vp_nearbyint_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_nearbyint_nxv4f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv8f16(, , i32) - define @vp_nearbyint_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_nearbyint_nxv8f16_unmasked( %v ret %v } -declare @llvm.vp.nearbyint.nxv16f16(, , i32) - define @vp_nearbyint_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_nearbyint_nxv16f16_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv32f16(, , i32) - define @vp_nearbyint_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_nearbyint_nxv32f16_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv1f32(, , i32) - define @vp_nearbyint_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_nearbyint_nxv1f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv2f32(, , i32) - define @vp_nearbyint_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_nearbyint_nxv2f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv4f32(, , i32) - define @vp_nearbyint_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_nearbyint_nxv4f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv8f32(, , i32) - define @vp_nearbyint_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_nearbyint_nxv8f32_unmasked( ret %v } -declare @llvm.vp.nearbyint.nxv16f32(, , i32) - define @vp_nearbyint_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_nearbyint_nxv16f32_unmasked( %v } -declare @llvm.vp.nearbyint.nxv1f64(, , i32) - define @vp_nearbyint_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_nearbyint_nxv1f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv2f64(, , i32) - define @vp_nearbyint_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_nearbyint_nxv2f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv4f64(, , i32) - define @vp_nearbyint_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_nearbyint_nxv4f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv7f64(, , i32) - define @vp_nearbyint_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_nearbyint_nxv7f64_unmasked( %v } -declare @llvm.vp.nearbyint.nxv8f64(, , i32) - define @vp_nearbyint_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_nearbyint_nxv8f64_unmasked( @llvm.vp.nearbyint.nxv16f64(, , i32) define @vp_nearbyint_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll index c6662e092aa5a..0654fe8bd8d66 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s -declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) - define @foo( %x, %y, %z, %m, i32 %vl) { ; CHECK-LABEL: name: foo ; CHECK: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63459.ll b/llvm/test/CodeGen/RISCV/rvv/pr63459.ll index 08a2e588330b5..ff964ece0bcb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pr63459.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr63459.ll @@ -17,4 +17,3 @@ bb: ret void } -declare void @llvm.vp.scatter.nxv2i32.nxv2p0(, , , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll index ce8db766234e8..998554a68d47d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll @@ -71,10 +71,5 @@ entry: ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 - declare void @bar(i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, i32 noundef signext, ptr noundef) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 - -attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } diff --git a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll index 06bce82efb313..8a2f6bb9b0074 100644 --- a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll @@ -4,8 +4,6 @@ ; This test previously crashed with an error "ran out of registers during register allocation" -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_mask_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %mask, i32 %vl) { ; CHECK-LABEL: test_vsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll b/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll index 2d64defe8c7b1..c574bd758bd2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reproducer-pr146855.ll @@ -57,16 +57,8 @@ middle.block: ; preds = %vector.body ret i32 %13 } -; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: read) -declare @llvm.masked.load.nxv4i32.p0(ptr captures(none), i32 immarg, , ) #1 - -; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) -declare i32 @llvm.vector.reduce.add.nxv4i32() #2 - ; uselistorder directives uselistorder ptr @llvm.masked.load.nxv4i32.p0, { 1, 0 } uselistorder ptr @llvm.vector.reduce.add.nxv4i32, { 1, 0 } attributes #0 = { "target-features"="+v" } -attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) } -attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll index 091caa6c65fd2..380287dd555c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.rint.nxv1bf16(, , i32) - define @vp_rint_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv1bf16: ; CHECK: # %bb.0: @@ -62,8 +60,6 @@ define @vp_rint_nxv1bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv2bf16(, , i32) - define @vp_rint_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2bf16: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define @vp_rint_nxv2bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv4bf16(, , i32) - define @vp_rint_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4bf16: ; CHECK: # %bb.0: @@ -162,8 +156,6 @@ define @vp_rint_nxv4bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv8bf16(, , i32) - define @vp_rint_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8bf16: ; CHECK: # %bb.0: @@ -212,8 +204,6 @@ define @vp_rint_nxv8bf16_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv16bf16(, , i32) - define @vp_rint_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16bf16: ; CHECK: # %bb.0: @@ -262,8 +252,6 @@ define @vp_rint_nxv16bf16_unmasked( ret %v } -declare @llvm.vp.rint.nxv32bf16(, , i32) - define @vp_rint_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv32bf16: ; CHECK: # %bb.0: @@ -374,7 +362,6 @@ define @vp_rint_nxv32bf16_unmasked( %v = call @llvm.vp.rint.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.rint.nxv1f16(, , i32) define @vp_rint_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv1f16: @@ -454,8 +441,6 @@ define @vp_rint_nxv1f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv2f16(, , i32) - define @vp_rint_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv2f16: ; ZVFH: # %bb.0: @@ -534,8 +519,6 @@ define @vp_rint_nxv2f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv4f16(, , i32) - define @vp_rint_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv4f16: ; ZVFH: # %bb.0: @@ -614,8 +597,6 @@ define @vp_rint_nxv4f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv8f16(, , i32) - define @vp_rint_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv8f16: ; ZVFH: # %bb.0: @@ -696,8 +677,6 @@ define @vp_rint_nxv8f16_unmasked( %va, i3 ret %v } -declare @llvm.vp.rint.nxv16f16(, , i32) - define @vp_rint_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv16f16: ; ZVFH: # %bb.0: @@ -778,8 +757,6 @@ define @vp_rint_nxv16f16_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv32f16(, , i32) - define @vp_rint_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv32f16: ; ZVFH: # %bb.0: @@ -923,8 +900,6 @@ define @vp_rint_nxv32f16_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv1f32(, , i32) - define @vp_rint_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv1f32: ; CHECK: # %bb.0: @@ -961,8 +936,6 @@ define @vp_rint_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv2f32(, , i32) - define @vp_rint_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2f32: ; CHECK: # %bb.0: @@ -999,8 +972,6 @@ define @vp_rint_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv4f32(, , i32) - define @vp_rint_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4f32: ; CHECK: # %bb.0: @@ -1039,8 +1010,6 @@ define @vp_rint_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv8f32(, , i32) - define @vp_rint_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8f32: ; CHECK: # %bb.0: @@ -1079,8 +1048,6 @@ define @vp_rint_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.rint.nxv16f32(, , i32) - define @vp_rint_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16f32: ; CHECK: # %bb.0: @@ -1119,8 +1086,6 @@ define @vp_rint_nxv16f32_unmasked( %v ret %v } -declare @llvm.vp.rint.nxv1f64(, , i32) - define @vp_rint_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1245,8 +1210,6 @@ define @vp_rint_nxv1f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv2f64(, , i32) - define @vp_rint_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1379,8 +1342,6 @@ define @vp_rint_nxv2f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv4f64(, , i32) - define @vp_rint_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1513,8 +1474,6 @@ define @vp_rint_nxv4f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv7f64(, , i32) - define @vp_rint_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1647,8 +1606,6 @@ define @vp_rint_nxv7f64_unmasked( %va ret %v } -declare @llvm.vp.rint.nxv8f64(, , i32) - define @vp_rint_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1782,7 +1739,6 @@ define @vp_rint_nxv8f64_unmasked( %va } ; Test splitting. -declare @llvm.vp.rint.nxv16f64(, , i32) define @vp_rint_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_rint_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll index 4e5f6e0f65489..b8b377d70d3b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare-asm.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.vscale.i64() -declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) - define float @reduce_fadd(ptr %f) { ; CHECK-LABEL: reduce_fadd: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll index 8967fb8bf01ac..ffbcb65c40c33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll +++ b/llvm/test/CodeGen/RISCV/rvv/riscv-codegenprepare.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt %s -S -riscv-codegenprepare -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.vscale.i64() -declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) - define float @reduce_fadd(ptr %f) { ; CHECK-LABEL: define float @reduce_fadd( ; CHECK-SAME: ptr [[F:%.*]]) #[[ATTR2:[0-9]+]] { diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll index d1ea5aa76268a..37c036d38148a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.round.nxv1bf16(, , i32) - define @vp_round_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_round_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv2bf16(, , i32) - define @vp_round_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_round_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv4bf16(, , i32) - define @vp_round_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_round_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv8bf16(, , i32) - define @vp_round_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_round_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.round.nxv16bf16(, , i32) - define @vp_round_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_round_nxv16bf16_unmasked( %v } -declare @llvm.vp.round.nxv32bf16(, , i32) - define @vp_round_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_round_nxv32bf16_unmasked( @llvm.vp.round.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.round.nxv1f16(, , i32) define @vp_round_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv1f16: @@ -490,8 +477,6 @@ define @vp_round_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv2f16(, , i32) - define @vp_round_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_round_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv4f16(, , i32) - define @vp_round_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_round_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv8f16(, , i32) - define @vp_round_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_round_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.round.nxv16f16(, , i32) - define @vp_round_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_round_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.round.nxv32f16(, , i32) - define @vp_round_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_round_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.round.nxv1f32(, , i32) - define @vp_round_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_round_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv2f32(, , i32) - define @vp_round_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_round_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv4f32(, , i32) - define @vp_round_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_round_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv8f32(, , i32) - define @vp_round_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_round_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.round.nxv16f32(, , i32) - define @vp_round_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_round_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.round.nxv1f64(, , i32) - define @vp_round_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_round_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv2f64(, , i32) - define @vp_round_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_round_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv4f64(, , i32) - define @vp_round_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_round_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv7f64(, , i32) - define @vp_round_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_round_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.round.nxv8f64(, , i32) - define @vp_round_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_round_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.round.nxv16f64(, , i32) define @vp_round_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_round_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll index 23d0e97c1c82b..37a9ec1c0a8aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.roundeven.nxv1bf16(, , i32) - define @vp_roundeven_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_roundeven_nxv1bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv2bf16(, , i32) - define @vp_roundeven_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_roundeven_nxv2bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv4bf16(, , i32) - define @vp_roundeven_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_roundeven_nxv4bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv8bf16(, , i32) - define @vp_roundeven_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_roundeven_nxv8bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv16bf16(, , i32) - define @vp_roundeven_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_roundeven_nxv16bf16_unmasked( %v } -declare @llvm.vp.roundeven.nxv32bf16(, , i32) - define @vp_roundeven_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_roundeven_nxv32bf16_unmasked( @llvm.vp.roundeven.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.roundeven.nxv1f16(, , i32) define @vp_roundeven_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv1f16: @@ -490,8 +477,6 @@ define @vp_roundeven_nxv1f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv2f16(, , i32) - define @vp_roundeven_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_roundeven_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv4f16(, , i32) - define @vp_roundeven_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_roundeven_nxv4f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv8f16(, , i32) - define @vp_roundeven_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_roundeven_nxv8f16_unmasked( %v ret %v } -declare @llvm.vp.roundeven.nxv16f16(, , i32) - define @vp_roundeven_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_roundeven_nxv16f16_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv32f16(, , i32) - define @vp_roundeven_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_roundeven_nxv32f16_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv1f32(, , i32) - define @vp_roundeven_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_roundeven_nxv1f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv2f32(, , i32) - define @vp_roundeven_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_roundeven_nxv2f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv4f32(, , i32) - define @vp_roundeven_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_roundeven_nxv4f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv8f32(, , i32) - define @vp_roundeven_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_roundeven_nxv8f32_unmasked( ret %v } -declare @llvm.vp.roundeven.nxv16f32(, , i32) - define @vp_roundeven_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_roundeven_nxv16f32_unmasked( %v } -declare @llvm.vp.roundeven.nxv1f64(, , i32) - define @vp_roundeven_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_roundeven_nxv1f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv2f64(, , i32) - define @vp_roundeven_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_roundeven_nxv2f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv4f64(, , i32) - define @vp_roundeven_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_roundeven_nxv4f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv7f64(, , i32) - define @vp_roundeven_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_roundeven_nxv7f64_unmasked( %v } -declare @llvm.vp.roundeven.nxv8f64(, , i32) - define @vp_roundeven_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_roundeven_nxv8f64_unmasked( @llvm.vp.roundeven.nxv16f64(, , i32) define @vp_roundeven_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundeven_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll index 4d8066d12c9ad..5553b988fec97 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN -declare @llvm.vp.roundtozero.nxv1bf16(, , i32) - define @vp_roundtozero_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv1bf16: ; CHECK: # %bb.0: @@ -66,8 +64,6 @@ define @vp_roundtozero_nxv1bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv2bf16(, , i32) - define @vp_roundtozero_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2bf16: ; CHECK: # %bb.0: @@ -120,8 +116,6 @@ define @vp_roundtozero_nxv2bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv4bf16(, , i32) - define @vp_roundtozero_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4bf16: ; CHECK: # %bb.0: @@ -174,8 +168,6 @@ define @vp_roundtozero_nxv4bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv8bf16(, , i32) - define @vp_roundtozero_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8bf16: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vp_roundtozero_nxv8bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv16bf16(, , i32) - define @vp_roundtozero_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16bf16: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vp_roundtozero_nxv16bf16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv32bf16(, , i32) - define @vp_roundtozero_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv32bf16: ; CHECK: # %bb.0: @@ -402,7 +390,6 @@ define @vp_roundtozero_nxv32bf16_unmasked( @llvm.vp.roundtozero.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.roundtozero.nxv1f16(, , i32) define @vp_roundtozero_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv1f16: @@ -490,8 +477,6 @@ define @vp_roundtozero_nxv1f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv2f16(, , i32) - define @vp_roundtozero_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv2f16: ; ZVFH: # %bb.0: @@ -578,8 +563,6 @@ define @vp_roundtozero_nxv2f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv4f16(, , i32) - define @vp_roundtozero_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv4f16: ; ZVFH: # %bb.0: @@ -666,8 +649,6 @@ define @vp_roundtozero_nxv4f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv8f16(, , i32) - define @vp_roundtozero_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv8f16: ; ZVFH: # %bb.0: @@ -756,8 +737,6 @@ define @vp_roundtozero_nxv8f16_unmasked( ret %v } -declare @llvm.vp.roundtozero.nxv16f16(, , i32) - define @vp_roundtozero_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv16f16: ; ZVFH: # %bb.0: @@ -846,8 +825,6 @@ define @vp_roundtozero_nxv16f16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv32f16(, , i32) - define @vp_roundtozero_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv32f16: ; ZVFH: # %bb.0: @@ -1003,8 +980,6 @@ define @vp_roundtozero_nxv32f16_unmasked( %v } -declare @llvm.vp.roundtozero.nxv1f32(, , i32) - define @vp_roundtozero_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv1f32: ; CHECK: # %bb.0: @@ -1045,8 +1020,6 @@ define @vp_roundtozero_nxv1f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv2f32(, , i32) - define @vp_roundtozero_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2f32: ; CHECK: # %bb.0: @@ -1087,8 +1060,6 @@ define @vp_roundtozero_nxv2f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv4f32(, , i32) - define @vp_roundtozero_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4f32: ; CHECK: # %bb.0: @@ -1131,8 +1102,6 @@ define @vp_roundtozero_nxv4f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv8f32(, , i32) - define @vp_roundtozero_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8f32: ; CHECK: # %bb.0: @@ -1175,8 +1144,6 @@ define @vp_roundtozero_nxv8f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv16f32(, , i32) - define @vp_roundtozero_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16f32: ; CHECK: # %bb.0: @@ -1219,8 +1186,6 @@ define @vp_roundtozero_nxv16f32_unmasked( %v } -declare @llvm.vp.roundtozero.nxv1f64(, , i32) - define @vp_roundtozero_nxv1f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64: ; RV32ZVFH: # %bb.0: @@ -1361,8 +1326,6 @@ define @vp_roundtozero_nxv1f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv2f64(, , i32) - define @vp_roundtozero_nxv2f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64: ; RV32ZVFH: # %bb.0: @@ -1511,8 +1474,6 @@ define @vp_roundtozero_nxv2f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv4f64(, , i32) - define @vp_roundtozero_nxv4f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64: ; RV32ZVFH: # %bb.0: @@ -1661,8 +1622,6 @@ define @vp_roundtozero_nxv4f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv7f64(, , i32) - define @vp_roundtozero_nxv7f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64: ; RV32ZVFH: # %bb.0: @@ -1811,8 +1770,6 @@ define @vp_roundtozero_nxv7f64_unmasked( %v } -declare @llvm.vp.roundtozero.nxv8f64(, , i32) - define @vp_roundtozero_nxv8f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64: ; RV32ZVFH: # %bb.0: @@ -1962,7 +1919,6 @@ define @vp_roundtozero_nxv8f64_unmasked( @llvm.vp.roundtozero.nxv16f64(, , i32) define @vp_roundtozero_nxv16f64( %va, %m, i32 zeroext %evl) { ; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index 36ac7d9ec4e91..f05f6ab16c5e3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -269,5 +269,4 @@ define @foo( %a, %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32, i32 %gvl) declare i32 @puts(ptr); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll index 83e16ded22db1..2ffb15b7af545 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -592,8 +592,3 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr , i32, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll index 9e77f488bddd0..138422c5a50e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -14,7 +14,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zcmp,+prefer-vsetvli-over-read-vlenb -O2 < %s \ ; RUN: | FileCheck --check-prefix=SPILL-O2-ZCMP-VSETVLI %s - @.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1 define @foo( %a, %b, %c, i64 %gvl) nounwind @@ -262,5 +261,4 @@ define @foo( %a, %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64, i64 %gvl) declare i32 @puts(ptr); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll index ab9ce5173123d..d39f7cc3c7d57 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -592,8 +592,3 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr , i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll index eaa8d03ed156f..3e43bb68c79b9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -198,10 +198,4 @@ entry: ret i32 0 } -declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) - -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) - -declare @llvm.riscv.vle.nxv16i32.i64(, ptr nocapture, i64) - attributes #0 = { noinline nounwind optnone "frame-pointer"="all" } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll index 53ca205f6bf63..52e28dacc378d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare @llvm.riscv.vmerge.nxv2i32.nxv2i32(, , , , i64); -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32(, , , , i64); - define @vpmerge_vadd( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vadd: ; CHECK: # %bb.0: @@ -14,7 +11,6 @@ define @vpmerge_vadd( %passthru, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(, , , , i64, i64) define @vpmerge_vsub( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vsub: @@ -26,7 +22,6 @@ define @vpmerge_vsub( %passthru, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(, , , , i64, i64) define @vpmerge_vfadd( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vfadd: @@ -39,7 +34,6 @@ define @vpmerge_vfadd( %passthru, %b } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(, , , , i64, i64, i64) define @vpmerge_vfsub( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vfsub: @@ -51,7 +45,6 @@ define @vpmerge_vfsub( %passthru, @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(, , , , i64, i64, i64) define @vpmerge_vwadd( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vwadd: @@ -63,7 +56,6 @@ define @vpmerge_vwadd( %passthru, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(, , , , i64, i64) define @vpmerge_vle( %passthru, ptr %p, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vle: @@ -79,9 +71,7 @@ define @vpmerge_vle( %passthru, ptr %p, @llvm.riscv.vmerge.nxv2i32.nxv2i32( %passthru, %passthru, %a, splat (i1 -1), i64 %vl) ret %b } -declare @llvm.riscv.vle.mask.nxv2i32(, ptr, , i64, i64) -declare @llvm.riscv.vslideup.mask.nxv2i32(, , i64, , i64, i64) define @vpmerge_vslideup( %passthru, %v, i64 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslideup: ; CHECK: # %bb.0: @@ -93,7 +83,6 @@ define @vpmerge_vslideup( %passthru, %b } -declare @llvm.riscv.vslidedown.mask.nxv2i32(, , i64, , i64, i64) define @vpmerge_vslidedown( %passthru, %v, i64 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslidedown: ; CHECK: # %bb.0: @@ -105,7 +94,6 @@ define @vpmerge_vslidedown( %passthru, %b } -declare @llvm.riscv.vslide1up.mask.nxv2i32(, , i32, , i64, i64) define @vpmerge_vslide1up( %passthru, %v, i32 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslide1up: ; CHECK: # %bb.0: @@ -117,7 +105,6 @@ define @vpmerge_vslide1up( %passthru, %b } -declare @llvm.riscv.vslide1down.mask.nxv2i32(, , i32, , i64, i64) define @vpmerge_vslide1down( %passthru, %v, i32 %x, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vslide1down: ; CHECK: # %bb.0: @@ -209,8 +196,6 @@ define @vmerge_larger_vl_poison_passthru( % } ; Test VFCVT_RM -declare @llvm.floor.nxv2f32() -declare @llvm.vp.merge.nxv2i32(, , , i32) define @vmerge_vfcvt_rm( %passthru, %a, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmerge_vfcvt_rm: ; CHECK: # %bb.0: # %entry @@ -227,7 +212,6 @@ entry: } ; Test VIOTA_M -declare @llvm.riscv.viota.mask.nxv2i32(, , , i64, i64) define @vpmerge_viota( %passthru, %m, %vm, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_viota: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll index 364831f530747..acd9519bb5a8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -1,13 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) - ; Test binary operator with vp.merge and vp.smax. -declare @llvm.vp.add.nxv2i32(, , , i32) define @vpmerge_vpadd( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd: ; CHECK: # %bb.0: @@ -20,7 +14,6 @@ define @vpmerge_vpadd( %passthru, @llvm.vp.icmp.nxv2i32(, , metadata, , i32) define @vpmerge_vpadd2( %passthru, %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: @@ -48,7 +41,6 @@ define @vpmerge_vpadd3( %passthru, @llvm.vp.fadd.nxv2f32(, , , i32) define @vpmerge_vpfadd( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfadd: ; CHECK: # %bb.0: @@ -61,7 +53,6 @@ define @vpmerge_vpfadd( %passthru, @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(, , , i64) define @vpmerge_vrgatherei16( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vrgatherei16: ; CHECK: # %bb.0: @@ -75,7 +66,6 @@ define @vpmerge_vrgatherei16( %passthru, @llvm.vp.fptosi.nxv2i16.nxv2f32(, , i32) define @vpmerge_vpfptosi( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptosi: ; CHECK: # %bb.0: @@ -88,7 +78,6 @@ define @vpmerge_vpfptosi( %passthru, @llvm.vp.sitofp.nxv2f32.nxv2i64(, , i32) define @vpmerge_vpsitofp( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpsitofp: ; CHECK: # %bb.0: @@ -101,7 +90,6 @@ define @vpmerge_vpsitofp( %passthru, @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) define @vpmerge_vpzext( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpzext: ; CHECK: # %bb.0: @@ -114,7 +102,6 @@ define @vpmerge_vpzext( %passthru, @llvm.vp.trunc.nxv2i32.nxv2i64(, , i32) define @vpmerge_vptrunc( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: @@ -127,7 +114,6 @@ define @vpmerge_vptrunc( %passthru, @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) define @vpmerge_vpfpext( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfpext: ; CHECK: # %bb.0: @@ -140,7 +126,6 @@ define @vpmerge_vpfpext( %passthru, < } ; Test integer truncation by vp.trunc. -declare @llvm.vp.fptrunc.nxv2f32.nxv2f64(, , i32) define @vpmerge_vpfptrunc( %passthru, %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptrunc: ; CHECK: # %bb.0: @@ -153,7 +138,6 @@ define @vpmerge_vpfptrunc( %passthru, < } ; Test load operation by vp.load. -declare @llvm.vp.load.nxv2i32.p0(ptr, , i32) define @vpmerge_vpload( %passthru, ptr %p, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload: ; CHECK: # %bb.0: @@ -194,7 +178,6 @@ define void @vpmerge_vpload_store( %passthru, ptr %p, , i64 } @llvm.riscv.vleff.nxv2i32(, ptr, i64) define @vpmerge_vleff( %passthru, ptr %p, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vleff: ; CHECK: # %bb.0: @@ -209,7 +192,6 @@ define @vpmerge_vleff( %passthru, ptr %p, < } ; Test strided load by riscv.vlse -declare @llvm.riscv.vlse.nxv2i32(, ptr, i64, i64) define @vpmerge_vlse( %passthru, ptr %p, %m, i64 %s, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vlse: ; CHECK: # %bb.0: @@ -223,7 +205,6 @@ define @vpmerge_vlse( %passthru, ptr %p, < } ; Test indexed load by riscv.vluxei -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64(, ptr, , i64) define @vpmerge_vluxei( %passthru, ptr %p, %idx, %m, i64 %s, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vluxei: ; CHECK: # %bb.0: @@ -237,7 +218,6 @@ define @vpmerge_vluxei( %passthru, ptr %p, } ; Test vector index by riscv.vid -declare @llvm.riscv.vid.nxv2i32(, i64) define @vpmerge_vid( %passthru, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vid: ; CHECK: # %bb.0: @@ -251,7 +231,6 @@ define @vpmerge_vid( %passthru, @llvm.riscv.viota.nxv2i32(, , i64) define @vpmerge_viota( %passthru, %m, %vm, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_viota: ; CHECK: # %bb.0: @@ -280,7 +259,6 @@ define @vpmerge_viota2( %passthru, @llvm.riscv.vfclass.nxv2i32(, , i64) define @vpmerge_vflcass( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vflcass: ; CHECK: # %bb.0: @@ -294,7 +272,6 @@ define @vpmerge_vflcass( %passthru, @llvm.riscv.vfsqrt.nxv2f32(, , i64, i64) define @vpmerge_vfsqrt( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vfsqrt: ; CHECK: # %bb.0: @@ -308,7 +285,6 @@ define @vpmerge_vfsqrt( %passthru, @llvm.riscv.vfrec7.nxv2f32(, , i64, i64) define @vpmerge_vfrec7( %passthru, %vf, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vfrec7: ; CHECK: # %bb.0: @@ -361,8 +337,6 @@ define @vpmerge_constrained_fadd( %pass %b = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, %m, i64 %vl) strictfp ret %b } -declare @llvm.experimental.constrained.fadd.nxv2f32(, , metadata, metadata) -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32(, , , , i64) ; This shouldn't be folded because we need to preserve exceptions with ; "fpexcept.strict" exception behaviour, and masking may hide them. @@ -451,11 +425,6 @@ define @vpmerge_trunc( %passthru, %b } -declare @llvm.vp.select.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - ; Test binary operator with vp.select and vp.smax. define @vpselect_vpadd( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpadd: @@ -737,7 +706,6 @@ define @vpselect_vfrec7( %passthru, @llvm.riscv.vslideup.nxv2i32(, , i64, i64, i64) define @vpselect_vslideup( %passthru, %v, i64 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslideup: ; CHECK: # %bb.0: @@ -750,7 +718,6 @@ define @vpselect_vslideup( %passthru, %b } -declare @llvm.riscv.vslidedown.nxv2i32(, , i64, i64, i64) define @vpselect_vslidedown( %passthru, %v, i64 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslidedown: ; CHECK: # %bb.0: @@ -763,7 +730,6 @@ define @vpselect_vslidedown( %passthru, %b } -declare @llvm.riscv.vslide1up.nxv2i32.i32(, , i32, i64) define @vpselect_vslide1up( %passthru, %v, i32 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslide1up: ; CHECK: # %bb.0: @@ -776,7 +742,6 @@ define @vpselect_vslide1up( %passthru, %b } -declare @llvm.riscv.vslide1down.nxv2i32.i32(, , i32, i64) define @vpselect_vslide1down( %passthru, %v, i32 %x, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vslide1down: ; CHECK: # %bb.0: @@ -932,12 +897,6 @@ entry: ; Test reductions don't have a vmerge folded into them, since the mask affects ; the result. -declare @llvm.riscv.vredsum.nxv2i32.nxv2i32( - , - , - , - i64) - define @vredsum( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vredsum: ; CHECK: # %bb.0: @@ -955,12 +914,6 @@ define @vredsum( %passthru, %b } -declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - , - , - , - i64, i64) - define @vfredusum( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vfredusum: ; CHECK: # %bb.0: @@ -1031,19 +984,8 @@ define @unfoldable_vredsum_allones_mask_diff_vl( %b } -declare @llvm.riscv.vle.nxv32i16.i64(, ptr nocapture, i64) -declare @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(, , i8, , i64, i64 immarg) -declare @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(, , i64) -declare @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(, , , , i64) -declare void @llvm.riscv.vse.nxv32i16.i64(, ptr nocapture, i64) -declare @llvm.riscv.vaaddu.nxv1i16.i16.i64(, , i16, i64 immarg, i64) -declare @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(, , , , i64) - ; Tests for folding vmerge into its ops when their VLs differ -declare @llvm.riscv.vadd.nxv2i32.nxv2i32(, , , i64) -declare @llvm.riscv.vmerge.nxv2i32.nxv2i32(, , , , i64) - ; Can fold with VL=2 define @vmerge_smaller_vl_same_passthru( %passthru, %x, %y, %m) { ; CHECK-LABEL: vmerge_smaller_vl_same_passthru: @@ -1195,7 +1137,6 @@ define @true_mask_vmerge_implicit_passthru( ret %b } - define @unfoldable_mismatched_sew( %passthru, %x, %y, %mask, i64 %avl) { ; CHECK-LABEL: unfoldable_mismatched_sew: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll index 838cd82156875..c1a2b11902315 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll @@ -50,4 +50,3 @@ entry: ret i32 %1 } -declare i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll index d3f3087e06cf5..6b8822d998344 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll @@ -10,7 +10,6 @@ ; RUN: llc -mtriple riscv64 -mattr=+m,+v,+zvl256b -riscv-v-vector-bits-max=256 < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64-VLEN256EXACT - define i64 @vscale_zero() nounwind { ; RV64-LABEL: vscale_zero: ; RV64: # %bb.0: # %entry @@ -200,5 +199,3 @@ entry: ret i64 %1 } - -declare i64 @llvm.vscale.i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll index 84f31e32a9b6b..bc0e05914a821 100644 --- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare { , } @llvm.sadd.with.overflow.nxv2i32(, ) - define @saddo_nvx2i32( %x, %y) { ; CHECK-LABEL: saddo_nvx2i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll index 32892bca84747..634e58198def3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,CHECK64,ZVFHMIN -declare @llvm.vp.fcmp.nxv1bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -656,8 +654,6 @@ define @fcmp_uno_vf_swap_nxv1bf16( %va, b ret %v } -declare @llvm.vp.fcmp.nxv3bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv3bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv3bf16: ; CHECK: # %bb.0: @@ -672,8 +668,6 @@ define @fcmp_oeq_vv_nxv3bf16( %va, %v } -declare @llvm.vp.fcmp.nxv8bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -1342,8 +1336,6 @@ define @fcmp_uno_vf_swap_nxv8bf16( %va, b ret %v } -declare @llvm.vp.fcmp.nxv64bf16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv64bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv64bf16: ; CHECK: # %bb.0: @@ -1554,8 +1546,6 @@ define @fcmp_oeq_vv_nxv64bf16( %va, %v } -declare @llvm.vp.fcmp.nxv1f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -2490,8 +2480,6 @@ define @fcmp_uno_vf_swap_nxv1f16( %va, half ret %v } -declare @llvm.vp.fcmp.nxv3f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv3f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv3f16: ; ZVFH: # %bb.0: @@ -2512,8 +2500,6 @@ define @fcmp_oeq_vv_nxv3f16( %va, %v } -declare @llvm.vp.fcmp.nxv8f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -3492,8 +3478,6 @@ define @fcmp_uno_vf_swap_nxv8f16( %va, half ret %v } -declare @llvm.vp.fcmp.nxv64f16(, , metadata, , i32) - define @fcmp_oeq_vv_nxv64f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: fcmp_oeq_vv_nxv64f16: ; ZVFH: # %bb.0: @@ -3750,8 +3734,6 @@ define @fcmp_oeq_vv_nxv64f16( %va, %v } -declare @llvm.vp.fcmp.nxv1f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f64: ; CHECK: # %bb.0: @@ -4268,8 +4250,6 @@ define @fcmp_uno_vf_swap_nxv1f64( %va, do ret %v } -declare @llvm.vp.fcmp.nxv3f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv3f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv3f64: ; CHECK: # %bb.0: @@ -4281,8 +4261,6 @@ define @fcmp_oeq_vv_nxv3f64( %va, %v } -declare @llvm.vp.fcmp.nxv8f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: ; CHECK: # %bb.0: @@ -4817,8 +4795,6 @@ define @fcmp_uno_vf_swap_nxv8f64( %va, do ret %v } -declare @llvm.vp.fcmp.nxv32f64(, , metadata, , i32) - define @fcmp_oeq_vv_nxv32f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK32-LABEL: fcmp_oeq_vv_nxv32f64: ; CHECK32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll index 5fde258fb442b..11e0691305e63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK -declare @llvm.vp.icmp.nxv1i1(, , metadata, , i32) - define @icmp_eq_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i1: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define @icmp_eq_vv_nxv1i1( %va, %v } -declare @llvm.vp.icmp.nxv2i1(, , metadata, , i32) - define @icmp_eq_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv2i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define @icmp_eq_vv_nxv2i1( %va, %v } -declare @llvm.vp.icmp.nxv4i1(, , metadata, , i32) - define @icmp_eq_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv4i1: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define @icmp_eq_vv_nxv4i1( %va, %v } -declare @llvm.vp.icmp.nxv8i1(, , metadata, , i32) - define @icmp_eq_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i1: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define @icmp_eq_vv_nxv8i1( %va, %v } -declare @llvm.vp.icmp.nxv16i1(, , metadata, , i32) - define @icmp_eq_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv16i1: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define @icmp_eq_vv_nxv16i1( %va, %v } -declare @llvm.vp.icmp.nxv32i1(, , metadata, , i32) - define @icmp_eq_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv32i1: ; CHECK: # %bb.0: @@ -76,8 +64,6 @@ define @icmp_eq_vv_nxv32i1( %va, %v } -declare @llvm.vp.icmp.nxv64i1(, , metadata, , i32) - define @icmp_eq_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll index 13c63d9c80a9a..c1de57bf850ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -7,8 +7,6 @@ ; FIXME: We're missing canonicalizations of ISD::VP_SETCC equivalent to those ; for ISD::SETCC, e.g., splats aren't moved to the RHS. -declare @llvm.vp.icmp.nxv1i8(, , metadata, , i32) - define @icmp_eq_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i8: ; CHECK: # %bb.0: @@ -498,8 +496,6 @@ define @icmp_sle_vi_swap_nxv1i8( %va, %v } -declare @llvm.vp.icmp.nxv3i8(, , metadata, , i32) - define @icmp_eq_vv_nxv3i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv3i8: ; CHECK: # %bb.0: @@ -534,8 +530,6 @@ define @icmp_eq_vx_swap_nxv3i8( %va, i8 %b, < ret %v } -declare @llvm.vp.icmp.nxv8i7(, , metadata, , i32) - define @icmp_eq_vv_nxv8i7( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i7: ; CHECK: # %bb.0: @@ -581,8 +575,6 @@ define @icmp_eq_vx_swap_nxv8i7( %va, i7 %b, < ret %v } -declare @llvm.vp.icmp.nxv8i8(, , metadata, , i32) - define @icmp_eq_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i8: ; CHECK: # %bb.0: @@ -1072,8 +1064,6 @@ define @icmp_sle_vi_swap_nxv8i8( %va, %v } -declare @llvm.vp.icmp.nxv128i8(, , metadata, , i32) - define @icmp_eq_vv_nxv128i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv128i8: ; CHECK: # %bb.0: @@ -1181,8 +1171,6 @@ define @icmp_eq_vx_swap_nxv128i8( %va, i8 ret %v } -declare @llvm.vp.icmp.nxv1i32(, , metadata, , i32) - define @icmp_eq_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1672,8 +1660,6 @@ define @icmp_sle_vi_swap_nxv1i32( %va, %v } -declare @llvm.vp.icmp.nxv8i32(, , metadata, , i32) - define @icmp_eq_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i32: ; CHECK: # %bb.0: @@ -2208,8 +2194,6 @@ define @icmp_sle_vi_swap_nxv8i32( %va, %v } -declare @llvm.vp.icmp.nxv32i32(, , metadata, , i32) - define @icmp_eq_vv_nxv32i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv32i32: ; CHECK: # %bb.0: @@ -2324,8 +2308,6 @@ define @icmp_eq_vx_swap_nxv32i32( %va, i32 ret %v } -declare @llvm.vp.icmp.nxv1i64(, , metadata, , i32) - define @icmp_eq_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i64: ; CHECK: # %bb.0: @@ -3067,8 +3049,6 @@ define @icmp_sle_vi_swap_nxv1i64( %va, %v } -declare @llvm.vp.icmp.nxv8i64(, , metadata, , i32) - define @icmp_eq_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll index 058f83e3f1f31..89c23688a6350 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll @@ -405,8 +405,6 @@ define @icmp_ult_vi_nxv8i8_4( %va) { ret %vc } -declare @llvm.riscv.vmv.v.x.nxv8i8.iXLen(, i8, iXLen); - ; Test that we don't optimize ult x, 0 -> ule x, -1 define @icmp_ult_vi_nxv8i8_5( %va, iXLen %vl) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_5: diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll index 472915939ffc4..75fb468e77b99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -53,12 +40,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -129,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -155,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -206,12 +154,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -231,13 +173,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll index b98a7aba7be16..0658647451858 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -53,12 +40,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -129,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -155,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -206,12 +154,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen( - , - , - float, - iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -231,13 +173,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll index 8ec7126422913..785d4e6b6026f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+xsfvfwmaccqqq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4bf16.nxv1bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32mf2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32mf2: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4bf16.nxv2bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m1: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4bf16.nxv4bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m2: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4bf16.nxv8bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m4: ; CHECK: # %bb.0: # %entry @@ -156,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4bf16.nxv16bf16.iXLen( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmacc_4x4x4_tu_f32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll index 25256f7914931..56af93877dbff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll index eebc51619480b..9c36b2b8a71d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmacc.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmacc_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll index 8d61901107931..3c499d4111356 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll index 0d7052356e558..8264b876d245d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccsu.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccsu_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll index 6667a89052e9c..875e9b4426fa6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll index 3332390f71e01..a6bc5900c6caf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccu.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccu_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll index 82a2a2e0fc835..d0ffec843cd01 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.2x8x2.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_2x8x2_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_2x8x2_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll index 74fb66f5bf351..69f04a5cc9833 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv2i32.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m1: ; CHECK: # %bb.0: # %entry @@ -42,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv4i32.nxv8i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m2: ; CHECK: # %bb.0: # %entry @@ -80,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv8i32.nxv8i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m4( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m4: ; CHECK: # %bb.0: # %entry @@ -118,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.sf.vqmaccus.4x8x4.nxv16i32.nxv8i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vqmaccus_4x8x4_tu_i32m8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqmaccus_4x8x4_tu_i32m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll index d9a49a1b6b6ea..1bd966ef7e481 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll @@ -14,5 +14,3 @@ entry: ret void } -; Function Attrs: nocallback nofree nosync nounwind willreturn -declare void @llvm.riscv.sf.vtzero.t.i64(i64 immarg, i64, i64, i64 immarg, i64 immarg) #0 diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll index 9b9a849cd7262..c57fce7e919ef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e4m3.e4m3.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll index b63974f04a66e..0131ff6edbb18 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e4m3.e5m2.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll index 62d629b1b1f1d..e35fd46958b9d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e5m2.e4m3.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll index 7a90c97bcf0be..468a1d59e29a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.e5m2.e5m2.iXLen.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll index 29451c60b9248..dcf35c50bc83a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll @@ -6,8 +6,6 @@ ; RUN: -mattr=+zvfh -mattr=+xsfmm32a32f -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv32f16(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_f_f_w2_f16m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_f_f_w2_f16m8: ; CHECK: # %bb.0: # %entry @@ -21,8 +19,6 @@ define void @test_sf_mm_f_f_w2_f16m8(iXLen %mtd, %v1, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_f_f_w1_f32m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_f_f_w1_f32m8: ; CHECK: # %bb.0: # %entry @@ -36,8 +32,6 @@ define void @test_sf_mm_f_f_w1_f32m8(iXLen %mtd, %v1, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_f_f_w1_f64m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_f_f_w1_f64m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll index 6a4b29ff0e786..0e698b2ca3d3a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.s.s.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_s_s_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_s_s_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll index 79239b01cd1d4..71093f381dd63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.s.u.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_s_u_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_s_u_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll index b0d039bb194a4..1ca83010fa14c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.u.s.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_u_s_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_u_s_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll index 913c277655e43..9bfc246f5325f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.mm.u.u.iXLen.nxv64i8.nxv64i8(iXLen, , , iXLen, iXLen, iXLen, iXLen) - define void @test_sf_mm_u_u_w4_i8m8_i8m8(iXLen %mtd, %v1, %v2, iXLen %tm, iXLen %tn, iXLen %tk) { ; CHECK-LABEL: test_sf_mm_u_u_w4_i8m8_i8m8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll index 8048dec110a5f..8df930e4ce3dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte16.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte16(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll index a526dc8471b1a..3e1ac2688d569 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte32.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte32(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll index ed0c48ac467e6..315839794af8c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte64.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte64(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll index 67b3ed2ec55ab..7451ef286ffc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vlte8.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vlte8(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vlte8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll index 4da37fad1b536..b8b2c0c3a709b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare iXLen @llvm.riscv.sf.vsettk.iXLen(iXLen, iXLen, iXLen) - define iXLen @test_sf_vsettk(iXLen %tk) { ; CHECK-LABEL: test_sf_vsettk: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll index 143c26cc8cff1..5d5970726d643 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare iXLen @llvm.riscv.sf.vsettm.iXLen(iXLen, iXLen, iXLen) - define iXLen @test_sf_vsettm(iXLen %tm) { ; CHECK-LABEL: test_sf_vsettm: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll index 48fa1bc8f6cbe..07584899fbcac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen, iXLen, iXLen) - define iXLen @test_sf_vsettnt_e8w1(iXLen %tn) { ; CHECK-LABEL: test_sf_vsettnt_e8w1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll index 7a76151e01cc5..9e76b8b3172f5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste16.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste16(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll index 8ff6e6af3b02d..086bb347fd45a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste32.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste32(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll index 53990e4dd2483..cff4f78fa8817 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste64.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste64(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll index 09b72594ac7c6..3e080bec0878b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vste8.iXLen(iXLen, ptr, iXLen) - define dso_local void @test_sf_vste8(iXLen %tss, ptr %base, iXLen %vl) { ; CHECK-LABEL: test_sf_vste8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll index 394eb60f73743..b98b7216bdb8d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vtdiscard() - define dso_local void @test_sf_vtdiscard() { ; CHECK-LABEL: test_sf_vtdiscard: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll index 66c9d26c209f0..f551e48a5ef10 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vtmv.t.v.nxv32bf16.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_bf16m8(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_bf16m8: ; CHECK: # %bb.0: # %entry @@ -22,8 +20,6 @@ define void @test_sf_vtmv_t_v_bf16m8(iXLen %tss, %src, iX ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv32f16.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_f16(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_f16: ; CHECK: # %bb.0: # %entry @@ -35,8 +31,6 @@ define void @test_sf_vtmv_t_v_f16(iXLen %tss, %src, iXLen % ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv16f32.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_f32(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_f32: ; CHECK: # %bb.0: # %entry @@ -48,8 +42,6 @@ define void @test_sf_vtmv_t_v_f32(iXLen %tss, %src, iXLen ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv8f64.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_f64(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_f64: ; CHECK: # %bb.0: # %entry @@ -61,8 +53,6 @@ define void @test_sf_vtmv_t_v_f64(iXLen %tss, %src, iXLen ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv64i8.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i8(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i8: ; CHECK: # %bb.0: # %entry @@ -74,8 +64,6 @@ define void @test_sf_vtmv_t_v_i8(iXLen %tss, %src, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv32i16.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i16(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i16: ; CHECK: # %bb.0: # %entry @@ -87,8 +75,6 @@ define void @test_sf_vtmv_t_v_i16(iXLen %tss, %src, iXLen %v ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv16i32.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i32(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i32: ; CHECK: # %bb.0: # %entry @@ -100,8 +86,6 @@ define void @test_sf_vtmv_t_v_i32(iXLen %tss, %src, iXLen %v ret void } -declare void @llvm.riscv.sf.vtmv.t.v.nxv8i64.iXLen(iXLen, , iXLen) - define void @test_sf_vtmv_t_v_i64(iXLen %tss, %src, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_t_v_i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll index 0dcc2ab5b9a0d..33445b59cca1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll @@ -9,8 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.sf.vtmv.v.t.nxv32bf16.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_bf16m8(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_bf16m8: ; CHECK: # %bb.0: # %entry @@ -22,8 +20,6 @@ define @test_sf_vtmv_v_t_bf16m8(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv32f16.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_f16(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_f16: ; CHECK: # %bb.0: # %entry @@ -35,8 +31,6 @@ define @test_sf_vtmv_v_t_f16(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv16f32.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_f32(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_f32: ; CHECK: # %bb.0: # %entry @@ -48,8 +42,6 @@ define @test_sf_vtmv_v_t_f32(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv8f64.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_f64(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_f64: ; CHECK: # %bb.0: # %entry @@ -61,8 +53,6 @@ define @test_sf_vtmv_v_t_f64(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv64i8.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i8(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i8: ; CHECK: # %bb.0: # %entry @@ -74,8 +64,6 @@ define @test_sf_vtmv_v_t_i8(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv32i16.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i16(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i16: ; CHECK: # %bb.0: # %entry @@ -87,8 +75,6 @@ define @test_sf_vtmv_v_t_i16(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv16i32.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i32(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i32: ; CHECK: # %bb.0: # %entry @@ -100,8 +86,6 @@ define @test_sf_vtmv_v_t_i32(iXLen %tss, iXLen %vl) { ret %0 } -declare @llvm.riscv.sf.vtmv.v.t.nxv8i64.iXLen(iXLen, iXLen) - define @test_sf_vtmv_v_t_i64(iXLen %tss, iXLen %vl) { ; CHECK-LABEL: test_sf_vtmv_v_t_i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll index bbccb026f161b..3ae5ec09be4dd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll @@ -9,7 +9,6 @@ ; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare void @llvm.riscv.sf.vtzero.t.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen) define void @test_sf_vtzero_t(iXLen %tm, iXLen %tn) { ; CHECK-LABEL: test_sf_vtzero_t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll index 350c888a2c7d6..8608179fb09e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-i1.ll @@ -4,8 +4,6 @@ ; Make sure we don't unnecessrily sink i1 vector splats. -declare <8 x i1> @llvm.vp.and.v4i1(<8 x i1>, <8 x i1>, <8 x i1>, i32) - define void @sink_splat_vp_and_i1(ptr nocapture %a, i1 zeroext %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_and_i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 19a184148c0b6..519312766feeb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -2270,11 +2270,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare i64 @llvm.vscale.i64() -declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) -declare @llvm.fma.nxv2f32(, , ) -declare float @llvm.fma.f32(float, float, float) - define void @sink_splat_icmp(ptr nocapture %x, i32 signext %y) { ; CHECK-LABEL: sink_splat_icmp: ; CHECK: # %bb.0: # %entry @@ -2309,7 +2304,6 @@ vector.body: ; preds = %vector.body, %entry for.cond.cleanup: ; preds = %vector.body ret void } -declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>) define void @sink_splat_fcmp(ptr nocapture %x, float %y) { ; CHECK-LABEL: sink_splat_fcmp: @@ -2345,7 +2339,6 @@ vector.body: ; preds = %vector.body, %entry for.cond.cleanup: ; preds = %vector.body ret void } -declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>) define void @sink_splat_udiv(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_udiv: @@ -2847,8 +2840,6 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp.not, label %for.cond.cleanup, label %for.body } -declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_min(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_min: ; CHECK: # %bb.0: # %entry @@ -2917,8 +2908,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_max(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_max: ; CHECK: # %bb.0: # %entry @@ -2987,8 +2976,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_umin(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_umin: ; CHECK: # %bb.0: # %entry @@ -3057,8 +3044,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_umax(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_umax: ; CHECK: # %bb.0: # %entry @@ -3127,8 +3112,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_sadd_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_sadd_sat: ; CHECK: # %bb.0: # %entry @@ -3197,8 +3180,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_ssub_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_ssub_sat: ; CHECK: # %bb.0: # %entry @@ -3233,8 +3214,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_uadd_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_uadd_sat: ; CHECK: # %bb.0: # %entry @@ -3303,8 +3282,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) - define void @sink_splat_usub_sat(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_usub_sat: ; CHECK: # %bb.0: # %entry @@ -3339,8 +3316,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_mul: ; CHECK: # %bb.0: # %entry @@ -3377,8 +3352,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_add: ; CHECK: # %bb.0: # %entry @@ -3451,8 +3424,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_sub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_sub: ; CHECK: # %bb.0: # %entry @@ -3525,8 +3496,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_shl(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_shl: ; CHECK: # %bb.0: # %entry @@ -3563,8 +3532,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_lshr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_lshr: ; CHECK: # %bb.0: # %entry @@ -3601,8 +3568,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_ashr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_ashr: ; CHECK: # %bb.0: # %entry @@ -3639,8 +3604,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fmul.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fmul(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fmul: ; CHECK: # %bb.0: # %entry @@ -3677,8 +3640,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fdiv.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fdiv: ; CHECK: # %bb.0: # %entry @@ -3751,8 +3712,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fadd: ; CHECK: # %bb.0: # %entry @@ -3789,8 +3748,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fsub.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fsub: ; CHECK: # %bb.0: # %entry @@ -3827,8 +3784,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.frsub.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_frsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_frsub: ; CHECK: # %bb.0: # %entry @@ -3865,8 +3820,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_udiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_udiv: ; CHECK: # %bb.0: # %entry @@ -3903,8 +3856,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_sdiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_sdiv: ; CHECK: # %bb.0: # %entry @@ -3941,8 +3892,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_urem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_urem: ; CHECK: # %bb.0: # %entry @@ -3979,8 +3928,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_srem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_srem: ; CHECK: # %bb.0: # %entry @@ -4056,8 +4003,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) - define void @sink_splat_vp_fma(ptr noalias nocapture %a, ptr nocapture readonly %b, float %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fma: ; CHECK: # %bb.0: # %entry @@ -4138,7 +4083,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } - define void @sink_splat_mul_lmul2(ptr nocapture %a, i64 signext %x) { ; CHECK-LABEL: sink_splat_mul_lmul2: ; CHECK: # %bb.0: # %entry @@ -4860,8 +4804,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32>, <4 x i32>, metadata, <4 x i1>, i32) - define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_icmp: ; CHECK: # %bb.0: # %entry @@ -4901,8 +4843,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i1> @llvm.vp.fcmp.v4f32(<4 x float>, <4 x float>, metadata, <4 x i1>, i32) - define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_fcmp: ; CHECK: # %bb.0: # %entry @@ -4942,8 +4882,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_min(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_min: ; CHECK: # %bb.0: # %entry @@ -5016,8 +4954,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_max(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_max: ; CHECK: # %bb.0: # %entry @@ -5126,8 +5062,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_umax(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_umax: ; CHECK: # %bb.0: # %entry @@ -5200,8 +5134,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_sadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_sadd_sat: ; CHECK: # %bb.0: # %entry @@ -5274,8 +5206,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_ssub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_ssub_sat: ; CHECK: # %bb.0: # %entry @@ -5312,8 +5242,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_uadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_uadd_sat: ; CHECK: # %bb.0: # %entry @@ -5386,8 +5314,6 @@ for.cond.cleanup: ; preds = %vector.body ret void } -declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define void @sink_splat_vp_usub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_usub_sat: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll index e0e8a80037733..b5861fe7afaa8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare { , } @llvm.smul.with.overflow.nxv1i8(, ) - define @smulo_nxv1i8( %x, %y) { ; CHECK-LABEL: smulo_nxv1i8: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define @smulo_nxv1i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv2i8(, ) - define @smulo_nxv2i8( %x, %y) { ; CHECK-LABEL: smulo_nxv2i8: ; CHECK: # %bb.0: @@ -39,8 +35,6 @@ define @smulo_nxv2i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv4i8(, ) - define @smulo_nxv4i8( %x, %y) { ; CHECK-LABEL: smulo_nxv4i8: ; CHECK: # %bb.0: @@ -58,8 +52,6 @@ define @smulo_nxv4i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv8i8(, ) - define @smulo_nxv8i8( %x, %y) { ; CHECK-LABEL: smulo_nxv8i8: ; CHECK: # %bb.0: @@ -77,8 +69,6 @@ define @smulo_nxv8i8( %x, % ret %d } -declare { , } @llvm.smul.with.overflow.nxv16i8(, ) - define @smulo_nxv16i8( %x, %y) { ; CHECK-LABEL: smulo_nxv16i8: ; CHECK: # %bb.0: @@ -96,8 +86,6 @@ define @smulo_nxv16i8( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv32i8(, ) - define @smulo_nxv32i8( %x, %y) { ; CHECK-LABEL: smulo_nxv32i8: ; CHECK: # %bb.0: @@ -115,8 +103,6 @@ define @smulo_nxv32i8( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv64i8(, ) - define @smulo_nxv64i8( %x, %y) { ; CHECK-LABEL: smulo_nxv64i8: ; CHECK: # %bb.0: @@ -134,8 +120,6 @@ define @smulo_nxv64i8( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv1i16(, ) - define @smulo_nxv1i16( %x, %y) { ; CHECK-LABEL: smulo_nxv1i16: ; CHECK: # %bb.0: @@ -153,8 +137,6 @@ define @smulo_nxv1i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv2i16(, ) - define @smulo_nxv2i16( %x, %y) { ; CHECK-LABEL: smulo_nxv2i16: ; CHECK: # %bb.0: @@ -172,8 +154,6 @@ define @smulo_nxv2i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv4i16(, ) - define @smulo_nxv4i16( %x, %y) { ; CHECK-LABEL: smulo_nxv4i16: ; CHECK: # %bb.0: @@ -191,8 +171,6 @@ define @smulo_nxv4i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv8i16(, ) - define @smulo_nxv8i16( %x, %y) { ; CHECK-LABEL: smulo_nxv8i16: ; CHECK: # %bb.0: @@ -210,8 +188,6 @@ define @smulo_nxv8i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv16i16(, ) - define @smulo_nxv16i16( %x, %y) { ; CHECK-LABEL: smulo_nxv16i16: ; CHECK: # %bb.0: @@ -229,8 +205,6 @@ define @smulo_nxv16i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv32i16(, ) - define @smulo_nxv32i16( %x, %y) { ; CHECK-LABEL: smulo_nxv32i16: ; CHECK: # %bb.0: @@ -248,8 +222,6 @@ define @smulo_nxv32i16( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv1i32(, ) - define @smulo_nxv1i32( %x, %y) { ; CHECK-LABEL: smulo_nxv1i32: ; CHECK: # %bb.0: @@ -267,8 +239,6 @@ define @smulo_nxv1i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv2i32(, ) - define @smulo_nxv2i32( %x, %y) { ; CHECK-LABEL: smulo_nxv2i32: ; CHECK: # %bb.0: @@ -286,8 +256,6 @@ define @smulo_nxv2i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv4i32(, ) - define @smulo_nxv4i32( %x, %y) { ; CHECK-LABEL: smulo_nxv4i32: ; CHECK: # %bb.0: @@ -305,8 +273,6 @@ define @smulo_nxv4i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv8i32(, ) - define @smulo_nxv8i32( %x, %y) { ; CHECK-LABEL: smulo_nxv8i32: ; CHECK: # %bb.0: @@ -324,8 +290,6 @@ define @smulo_nxv8i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv16i32(, ) - define @smulo_nxv16i32( %x, %y) { ; CHECK-LABEL: smulo_nxv16i32: ; CHECK: # %bb.0: @@ -343,8 +307,6 @@ define @smulo_nxv16i32( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv1i64(, ) - define @smulo_nxv1i64( %x, %y) { ; CHECK-LABEL: smulo_nxv1i64: ; CHECK: # %bb.0: @@ -363,8 +325,6 @@ define @smulo_nxv1i64( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv2i64(, ) - define @smulo_nxv2i64( %x, %y) { ; CHECK-LABEL: smulo_nxv2i64: ; CHECK: # %bb.0: @@ -383,8 +343,6 @@ define @smulo_nxv2i64( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv4i64(, ) - define @smulo_nxv4i64( %x, %y) { ; CHECK-LABEL: smulo_nxv4i64: ; CHECK: # %bb.0: @@ -403,8 +361,6 @@ define @smulo_nxv4i64( %x, %d } -declare { , } @llvm.smul.with.overflow.nxv8i64(, ) - define @smulo_nxv8i64( %x, %y) { ; CHECK-LABEL: smulo_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll index 26325328e5671..f4e77b1a4b4f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -declare @llvm.bitreverse.nxv2i64() - define i32 @splat_vector_split_i64() { ; CHECK-LABEL: splat_vector_split_i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll b/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll index fc67eec0f48a0..97ea888e6dda1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll @@ -226,5 +226,3 @@ define void @extract_vector_mixed3(ptr %p, ptr %p2, i32 %v) { ret void } - -declare <4 x i32> @llvm.vector.extract.v4i32.nxv1132( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll index 03b090def5119..36b57865fea9b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll @@ -1,11 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64>, <2 x i64>) -declare <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32>, <4 x i32>) -declare <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16>, <8 x i16>) -declare <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8>, <16 x i8>) - define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; CHECK-LABEL: vec_v2i64: ; CHECK: # %bb.0: @@ -79,11 +74,6 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ret <16 x i8> %tmp } -declare @llvm.sshl.sat.nxv2i64(, ) -declare @llvm.sshl.sat.nxv4i32(, ) -declare @llvm.sshl.sat.nxv8i16(, ) -declare @llvm.sshl.sat.nxv16i8(, ) - define @vec_nxv2i64( %x, %y) nounwind { ; CHECK-LABEL: vec_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll index 95c1292e41927..a98c40b532c4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64 -declare @llvm.stepvector.nxv1i8() - define @stepvector_nxv1i8() { ; CHECK-LABEL: stepvector_nxv1i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define @stepvector_nxv1i8() { ret %v } -declare @llvm.stepvector.nxv2i8() - define @stepvector_nxv2i8() { ; CHECK-LABEL: stepvector_nxv2i8: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define @stepvector_nxv2i8() { ret %v } -declare @llvm.stepvector.nxv3i8() - define @stepvector_nxv3i8() { ; CHECK-LABEL: stepvector_nxv3i8: ; CHECK: # %bb.0: @@ -38,8 +32,6 @@ define @stepvector_nxv3i8() { ret %v } -declare @llvm.stepvector.nxv4i8() - define @stepvector_nxv4i8() { ; CHECK-LABEL: stepvector_nxv4i8: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define @stepvector_nxv4i8() { ret %v } -declare @llvm.stepvector.nxv8i8() - define @stepvector_nxv8i8() { ; CHECK-LABEL: stepvector_nxv8i8: ; CHECK: # %bb.0: @@ -103,8 +93,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv16i8() - define @stepvector_nxv16i8() { ; CHECK-LABEL: stepvector_nxv16i8: ; CHECK: # %bb.0: @@ -115,8 +103,6 @@ define @stepvector_nxv16i8() { ret %v } -declare @llvm.stepvector.nxv32i8() - define @stepvector_nxv32i8() { ; CHECK-LABEL: stepvector_nxv32i8: ; CHECK: # %bb.0: @@ -127,8 +113,6 @@ define @stepvector_nxv32i8() { ret %v } -declare @llvm.stepvector.nxv64i8() - define @stepvector_nxv64i8() { ; CHECK-LABEL: stepvector_nxv64i8: ; CHECK: # %bb.0: @@ -139,8 +123,6 @@ define @stepvector_nxv64i8() { ret %v } -declare @llvm.stepvector.nxv1i16() - define @stepvector_nxv1i16() { ; CHECK-LABEL: stepvector_nxv1i16: ; CHECK: # %bb.0: @@ -151,8 +133,6 @@ define @stepvector_nxv1i16() { ret %v } -declare @llvm.stepvector.nxv2i16() - define @stepvector_nxv2i16() { ; CHECK-LABEL: stepvector_nxv2i16: ; CHECK: # %bb.0: @@ -163,8 +143,6 @@ define @stepvector_nxv2i16() { ret %v } -declare @llvm.stepvector.nxv2i15() - define @stepvector_nxv2i15() { ; CHECK-LABEL: stepvector_nxv2i15: ; CHECK: # %bb.0: @@ -175,8 +153,6 @@ define @stepvector_nxv2i15() { ret %v } -declare @llvm.stepvector.nxv3i16() - define @stepvector_nxv3i16() { ; CHECK-LABEL: stepvector_nxv3i16: ; CHECK: # %bb.0: @@ -187,8 +163,6 @@ define @stepvector_nxv3i16() { ret %v } -declare @llvm.stepvector.nxv4i16() - define @stepvector_nxv4i16() { ; CHECK-LABEL: stepvector_nxv4i16: ; CHECK: # %bb.0: @@ -199,8 +173,6 @@ define @stepvector_nxv4i16() { ret %v } -declare @llvm.stepvector.nxv8i16() - define @stepvector_nxv8i16() { ; CHECK-LABEL: stepvector_nxv8i16: ; CHECK: # %bb.0: @@ -211,8 +183,6 @@ define @stepvector_nxv8i16() { ret %v } -declare @llvm.stepvector.nxv16i16() - define @stepvector_nxv16i16() { ; CHECK-LABEL: stepvector_nxv16i16: ; CHECK: # %bb.0: @@ -264,8 +234,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv32i16() - define @stepvector_nxv32i16() { ; CHECK-LABEL: stepvector_nxv32i16: ; CHECK: # %bb.0: @@ -276,8 +244,6 @@ define @stepvector_nxv32i16() { ret %v } -declare @llvm.stepvector.nxv1i32() - define @stepvector_nxv1i32() { ; CHECK-LABEL: stepvector_nxv1i32: ; CHECK: # %bb.0: @@ -288,8 +254,6 @@ define @stepvector_nxv1i32() { ret %v } -declare @llvm.stepvector.nxv2i32() - define @stepvector_nxv2i32() { ; CHECK-LABEL: stepvector_nxv2i32: ; CHECK: # %bb.0: @@ -300,8 +264,6 @@ define @stepvector_nxv2i32() { ret %v } -declare @llvm.stepvector.nxv3i32() - define @stepvector_nxv3i32() { ; CHECK-LABEL: stepvector_nxv3i32: ; CHECK: # %bb.0: @@ -312,8 +274,6 @@ define @stepvector_nxv3i32() { ret %v } -declare @llvm.stepvector.nxv4i32() - define @stepvector_nxv4i32() { ; CHECK-LABEL: stepvector_nxv4i32: ; CHECK: # %bb.0: @@ -324,8 +284,6 @@ define @stepvector_nxv4i32() { ret %v } -declare @llvm.stepvector.nxv8i32() - define @stepvector_nxv8i32() { ; CHECK-LABEL: stepvector_nxv8i32: ; CHECK: # %bb.0: @@ -336,8 +294,6 @@ define @stepvector_nxv8i32() { ret %v } -declare @llvm.stepvector.nxv16i32() - define @stepvector_nxv16i32() { ; CHECK-LABEL: stepvector_nxv16i32: ; CHECK: # %bb.0: @@ -389,8 +345,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv1i64() - define @stepvector_nxv1i64() { ; CHECK-LABEL: stepvector_nxv1i64: ; CHECK: # %bb.0: @@ -401,8 +355,6 @@ define @stepvector_nxv1i64() { ret %v } -declare @llvm.stepvector.nxv2i64() - define @stepvector_nxv2i64() { ; CHECK-LABEL: stepvector_nxv2i64: ; CHECK: # %bb.0: @@ -413,8 +365,6 @@ define @stepvector_nxv2i64() { ret %v } -declare @llvm.stepvector.nxv3i64() - define @stepvector_nxv3i64() { ; CHECK-LABEL: stepvector_nxv3i64: ; CHECK: # %bb.0: @@ -425,8 +375,6 @@ define @stepvector_nxv3i64() { ret %v } -declare @llvm.stepvector.nxv4i64() - define @stepvector_nxv4i64() { ; CHECK-LABEL: stepvector_nxv4i64: ; CHECK: # %bb.0: @@ -437,8 +385,6 @@ define @stepvector_nxv4i64() { ret %v } -declare @llvm.stepvector.nxv8i64() - define @stepvector_nxv8i64() { ; CHECK-LABEL: stepvector_nxv8i64: ; CHECK: # %bb.0: @@ -525,8 +471,6 @@ entry: ret %3 } -declare @llvm.stepvector.nxv16i64() - define @stepvector_nxv16i64() { ; RV32-LABEL: stepvector_nxv16i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll index d801c5187b592..6c51848d9080d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll @@ -3,8 +3,6 @@ %struct.foo = type { i32, i32, i32, i32 } -declare @llvm.stepvector.nxv1i64() - define @gather(ptr %a, i32 %len) { ; CHECK-LABEL: @gather( ; CHECK-NEXT: vector.ph: @@ -662,11 +660,6 @@ define @vector_base_vector_offset(ptr %p, ret %x } -declare i64 @llvm.vscale.i64() -declare void @llvm.masked.scatter.nxv1i64.nxv1p0(, , i32, ) -declare @llvm.masked.gather.nxv1i64.nxv1p0(, i32, , ) - - define @vp_gather(ptr %a, i32 %len) { ; CHECK-LABEL: @vp_gather( ; CHECK-NEXT: vector.ph: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll index 081afcfab8dae..f087efcc5f57b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll @@ -5,8 +5,6 @@ ; these instructions. MachineMemOperand handling can't currently deal with a ; negative stride that would allow memory before the pointer to be read. -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, , i32) - define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: name: strided_vpload_nxv1i8_i8 ; CHECK: bb.0 (%ir-block.0): @@ -24,8 +22,6 @@ define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, ret %load } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i8(, ptr, i8, , i32) - define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: name: strided_vpstore_nxv1i8_i8 ; CHECK: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll index b6aa4affbb10f..6381887a1a2f9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -24,8 +24,6 @@ ; RUN: -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,CHECK-NO-OPT,CHECK-NO-OPT-ZVFHMIN,CHECK-NO-OPT-RV64 -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, , i32) - define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_i8: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr, i16, , i32) - define @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_i16: ; CHECK: # %bb.0: @@ -48,8 +44,6 @@ define @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %strid ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr, i64, , i32) - define @strided_vpload_nxv1i8_i64(ptr %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64: ; CHECK-RV32: # %bb.0: @@ -82,8 +76,6 @@ define @strided_vpload_nxv1i8_i64_allones_mask(ptr %ptr, i64 s ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8: ; CHECK: # %bb.0: @@ -104,8 +96,6 @@ define @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signe ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i8: ; CHECK: # %bb.0: @@ -116,8 +106,6 @@ define @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, < ret %load } -declare @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i8: ; CHECK: # %bb.0: @@ -128,8 +116,6 @@ define @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, < ret %load } -declare @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8: ; CHECK: # %bb.0: @@ -160,8 +146,6 @@ define @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signe ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i16: ; CHECK: # %bb.0: @@ -172,8 +156,6 @@ define @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i16: ; CHECK: # %bb.0: @@ -194,8 +176,6 @@ define @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 sig ret %load } -declare @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i16: ; CHECK: # %bb.0: @@ -216,8 +196,6 @@ define @strided_vpload_nxv4i16_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i16: ; CHECK: # %bb.0: @@ -228,8 +206,6 @@ define @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i32: ; CHECK: # %bb.0: @@ -240,8 +216,6 @@ define @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i32: ; CHECK: # %bb.0: @@ -262,8 +236,6 @@ define @strided_vpload_nxv2i32_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i32: ; CHECK: # %bb.0: @@ -284,8 +256,6 @@ define @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 sig ret %load } -declare @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i32: ; CHECK: # %bb.0: @@ -296,8 +266,6 @@ define @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64: ; CHECK: # %bb.0: @@ -328,8 +296,6 @@ define @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 sig ret %load } -declare @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i64: ; CHECK: # %bb.0: @@ -340,8 +306,6 @@ define @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i64: ; CHECK: # %bb.0: @@ -352,8 +316,6 @@ define @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i64: ; CHECK: # %bb.0: @@ -364,8 +326,6 @@ define @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, ret %load } -declare @llvm.experimental.vp.strided.load.nxv1bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1bf16: ; CHECK: # %bb.0: @@ -376,8 +336,6 @@ define @strided_vpload_nxv1bf16(ptr %ptr, i32 signext %str ret %load } -declare @llvm.experimental.vp.strided.load.nxv2bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2bf16: ; CHECK: # %bb.0: @@ -398,8 +356,6 @@ define @strided_vpload_nxv2bf16_allones_mask(ptr %ptr, i32 ret %load } -declare @llvm.experimental.vp.strided.load.nxv4bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4bf16: ; CHECK: # %bb.0: @@ -420,8 +376,6 @@ define @strided_vpload_nxv4bf16_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv8bf16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8bf16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8bf16: ; CHECK: # %bb.0: @@ -432,8 +386,6 @@ define @strided_vpload_nxv8bf16(ptr %ptr, i32 signext %str ret %load } -declare @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f16: ; CHECK: # %bb.0: @@ -444,8 +396,6 @@ define @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride ret %load } -declare @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f16: ; CHECK: # %bb.0: @@ -466,8 +416,6 @@ define @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 si ret %load } -declare @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f16: ; CHECK: # %bb.0: @@ -488,8 +436,6 @@ define @strided_vpload_nxv4f16_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f16: ; CHECK: # %bb.0: @@ -500,8 +446,6 @@ define @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride ret %load } -declare @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f32: ; CHECK: # %bb.0: @@ -512,8 +456,6 @@ define @strided_vpload_nxv1f32(ptr %ptr, i32 signext %strid ret %load } -declare @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f32: ; CHECK: # %bb.0: @@ -534,8 +476,6 @@ define @strided_vpload_nxv2f32_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f32: ; CHECK: # %bb.0: @@ -546,8 +486,6 @@ define @strided_vpload_nxv4f32(ptr %ptr, i32 signext %strid ret %load } -declare @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f32: ; CHECK: # %bb.0: @@ -568,8 +506,6 @@ define @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 s ret %load } -declare @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f64: ; CHECK: # %bb.0: @@ -590,8 +526,6 @@ define @strided_vpload_nxv1f64_unit_stride(ptr %ptr, %load } -declare @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f64: ; CHECK: # %bb.0: @@ -602,8 +536,6 @@ define @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stri ret %load } -declare @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv4f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f64: ; CHECK: # %bb.0: @@ -624,8 +556,6 @@ define @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32 ret %load } -declare @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr, i32, , i32) - define @strided_vpload_nxv8f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f64: ; CHECK: # %bb.0: @@ -657,8 +587,6 @@ define @strided_vpload_nxv3f64_allones_mask(ptr %ptr, i32 ret %v } -declare @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr, i32, , i32) - ; Splitting define @strided_load_nxv16f64(ptr %ptr, i64 %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_nxv16f64: @@ -758,8 +686,6 @@ define @strided_load_nxv16f64_allones_mask(ptr %ptr, i64 ret %v } -declare @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr, i64, , i32) - ; Widening + splitting (with HiIsEmpty == true) ; NOTE: We can't return as that introduces a vector ; store that can't yet be legalized through widening. In order to test purely @@ -867,10 +793,6 @@ define @strided_load_nxv17f64(ptr %ptr, i64 %stride, %lo } -declare @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr, i64, , i32) -declare @llvm.experimental.vector.extract.nxv1f64( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv16f64( %vec, i64 %idx) - ; Test unmasked integer zero strided define @zero_strided_unmasked_vpload_nxv1i8_i8(ptr %ptr) { ; CHECK-OPT-LABEL: zero_strided_unmasked_vpload_nxv1i8_i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll index 2791b262cafd1..2ec89888af077 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -12,8 +12,6 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s \ ; RUN: -check-prefixes=CHECK,CHECK-RV64 -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i8(, ptr, i8, , i32) - define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i8_i8: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i16(, ptr, i16, , i32) - define void @strided_vpstore_nxv1i8_i16( %val, ptr %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i8_i16: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define void @strided_vpstore_nxv1i8_i16( %val, ptr %ptr, i16 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i64(, ptr, i64, , i32) - define void @strided_vpstore_nxv1i8_i64( %val, ptr %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i64: ; CHECK-RV32: # %bb.0: @@ -54,8 +48,6 @@ define void @strided_vpstore_nxv1i8_i64( %val, ptr %ptr, i64 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i8: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define void @strided_vpstore_nxv1i8( %val, ptr %ptr, i32 signex ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i8: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define void @strided_vpstore_nxv2i8( %val, ptr %ptr, i32 signex ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i8: ; CHECK: # %bb.0: @@ -90,8 +78,6 @@ define void @strided_vpstore_nxv4i8( %val, ptr %ptr, i32 signex ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i8.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i8( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i8: ; CHECK: # %bb.0: @@ -112,8 +98,6 @@ define void @strided_vpstore_nxv8i8_unit_stride( %val, ptr %ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i16: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define void @strided_vpstore_nxv1i16( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i16: ; CHECK: # %bb.0: @@ -136,8 +118,6 @@ define void @strided_vpstore_nxv2i16( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i16: ; CHECK: # %bb.0: @@ -158,8 +138,6 @@ define void @strided_vpstore_nxv4i16_unit_stride( %val, ptr %p ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i16: ; CHECK: # %bb.0: @@ -170,8 +148,6 @@ define void @strided_vpstore_nxv8i16( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i32: ; CHECK: # %bb.0: @@ -182,8 +158,6 @@ define void @strided_vpstore_nxv1i32( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i32: ; CHECK: # %bb.0: @@ -194,8 +168,6 @@ define void @strided_vpstore_nxv2i32( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i32: ; CHECK: # %bb.0: @@ -216,8 +188,6 @@ define void @strided_vpstore_nxv4i32_unit_stride( %val, ptr %p ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i32: ; CHECK: # %bb.0: @@ -228,8 +198,6 @@ define void @strided_vpstore_nxv8i32( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1i64: ; CHECK: # %bb.0: @@ -250,8 +218,6 @@ define void @strided_vpstore_nxv1i64_unit_stride( %val, ptr %p ret void } -declare void @llvm.experimental.vp.strided.store.nxv2i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2i64: ; CHECK: # %bb.0: @@ -262,8 +228,6 @@ define void @strided_vpstore_nxv2i64( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv4i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4i64: ; CHECK: # %bb.0: @@ -274,8 +238,6 @@ define void @strided_vpstore_nxv4i64( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv8i64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8i64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8i64: ; CHECK: # %bb.0: @@ -286,8 +248,6 @@ define void @strided_vpstore_nxv8i64( %val, ptr %ptr, i32 sign ret void } -declare void @llvm.experimental.vp.strided.store.nxv1bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1bf16: ; CHECK: # %bb.0: @@ -298,8 +258,6 @@ define void @strided_vpstore_nxv1bf16( %val, ptr %ptr, i32 ret void } -declare void @llvm.experimental.vp.strided.store.nxv2bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2bf16: ; CHECK: # %bb.0: @@ -310,8 +268,6 @@ define void @strided_vpstore_nxv2bf16( %val, ptr %ptr, i32 ret void } -declare void @llvm.experimental.vp.strided.store.nxv4bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4bf16: ; CHECK: # %bb.0: @@ -332,8 +288,6 @@ define void @strided_vpstore_nxv4bf16_unit_stride( %val, pt ret void } -declare void @llvm.experimental.vp.strided.store.nxv8bf16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8bf16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8bf16: ; CHECK: # %bb.0: @@ -344,8 +298,6 @@ define void @strided_vpstore_nxv8bf16( %val, ptr %ptr, i32 ret void } -declare void @llvm.experimental.vp.strided.store.nxv1f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f16: ; CHECK: # %bb.0: @@ -356,8 +308,6 @@ define void @strided_vpstore_nxv1f16( %val, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv2f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2f16: ; CHECK: # %bb.0: @@ -368,8 +318,6 @@ define void @strided_vpstore_nxv2f16( %val, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv4f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f16: ; CHECK: # %bb.0: @@ -390,8 +338,6 @@ define void @strided_vpstore_nxv4f16_unit_stride( %val, ptr % ret void } -declare void @llvm.experimental.vp.strided.store.nxv8f16.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8f16( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8f16: ; CHECK: # %bb.0: @@ -402,8 +348,6 @@ define void @strided_vpstore_nxv8f16( %val, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv1f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f32: ; CHECK: # %bb.0: @@ -414,8 +358,6 @@ define void @strided_vpstore_nxv1f32( %val, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv2f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2f32: ; CHECK: # %bb.0: @@ -426,8 +368,6 @@ define void @strided_vpstore_nxv2f32( %val, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv4f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f32: ; CHECK: # %bb.0: @@ -448,8 +388,6 @@ define void @strided_vpstore_nxv4f32_unit_stride( %val, ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv8f32.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8f32( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8f32: ; CHECK: # %bb.0: @@ -460,8 +398,6 @@ define void @strided_vpstore_nxv8f32( %val, ptr %ptr, i32 si ret void } -declare void @llvm.experimental.vp.strided.store.nxv1f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv1f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv1f64: ; CHECK: # %bb.0: @@ -482,8 +418,6 @@ define void @strided_vpstore_nxv1f64_unit_stride( %val, ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv2f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv2f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv2f64: ; CHECK: # %bb.0: @@ -494,8 +428,6 @@ define void @strided_vpstore_nxv2f64( %val, ptr %ptr, i32 s ret void } -declare void @llvm.experimental.vp.strided.store.nxv4f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv4f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv4f64: ; CHECK: # %bb.0: @@ -506,8 +438,6 @@ define void @strided_vpstore_nxv4f64( %val, ptr %ptr, i32 s ret void } -declare void @llvm.experimental.vp.strided.store.nxv8f64.p0.i32(, ptr, i32, , i32) - define void @strided_vpstore_nxv8f64( %val, ptr %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpstore_nxv8f64: ; CHECK: # %bb.0: @@ -549,8 +479,6 @@ define void @strided_vpstore_nxv3f32_allones_mask( %v, ptr % ret void } -declare void @llvm.experimental.vp.strided.store.nxv3f32.p0.i32(, ptr , i32, , i32) - ; Splitting define void @strided_store_nxv16f64( %v, ptr %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_store_nxv16f64: @@ -603,8 +531,6 @@ define void @strided_store_nxv16f64_allones_mask( %v, ptr ret void } -declare void @llvm.experimental.vp.strided.store.nxv16f64.p0.i32(, ptr, i32, , i32) - ; Widening + splitting (with HiIsEmpty == true) define void @strided_store_nxv17f64( %v, ptr %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_store_nxv17f64: @@ -658,4 +584,3 @@ define void @strided_store_nxv17f64( %v, ptr %ptr, i32 sig ret void } -declare void @llvm.experimental.vp.strided.store.nxv17f64.p0.i32(, ptr, i32, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir index a2cdd473163df..adfa39b71fd4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -19,11 +19,7 @@ ret %load } - ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn - declare @llvm.masked.load.nxv8i64.p0(ptr, i32 immarg, , ) #1 - attributes #0 = { nounwind "target-features"="+v" } - attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+v" } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll index 3ed437eeed2ff..4d642d913a64e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll @@ -1,17 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.smax.v4i16(, ) -declare @llvm.smin.v4i16(, ) -declare @llvm.smax.v4i32(, ) -declare @llvm.smin.v4i32(, ) -declare @llvm.smax.v4i64(, ) -declare @llvm.smin.v4i64(, ) - -declare @llvm.umin.v4i16(, ) -declare @llvm.umin.v4i32(, ) -declare @llvm.umin.v4i64(, ) - define void @trunc_sat_i8i16_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i8i16_maxmin: ; CHECK: # %bb.0: @@ -134,7 +123,6 @@ define void @trunc_sat_u8u16_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i16i32_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i16i32_notopt: ; CHECK: # %bb.0: @@ -261,7 +249,6 @@ define void @trunc_sat_u16u32_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_i32i64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_i32i64_notopt: ; CHECK: # %bb.0: @@ -317,7 +304,6 @@ define void @trunc_sat_i32i64_minmax(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_notopt(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_notopt: ; CHECK: # %bb.0: @@ -352,7 +338,6 @@ define void @trunc_sat_u32u64_min(ptr %x, ptr %y) { ret void } - define void @trunc_sat_u32u64_maxmin(ptr %x, ptr %y) { ; CHECK-LABEL: trunc_sat_u32u64_maxmin: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll index 68e0c0089d0c7..4ac59b412b45c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare { , } @llvm.umul.with.overflow.nxv1i8(, ) - define @umulo_nxv1i8( %x, %y) { ; CHECK-LABEL: umulo_nxv1i8: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define @umulo_nxv1i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv2i8(, ) - define @umulo_nxv2i8( %x, %y) { ; CHECK-LABEL: umulo_nxv2i8: ; CHECK: # %bb.0: @@ -37,8 +33,6 @@ define @umulo_nxv2i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv4i8(, ) - define @umulo_nxv4i8( %x, %y) { ; CHECK-LABEL: umulo_nxv4i8: ; CHECK: # %bb.0: @@ -55,8 +49,6 @@ define @umulo_nxv4i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv8i8(, ) - define @umulo_nxv8i8( %x, %y) { ; CHECK-LABEL: umulo_nxv8i8: ; CHECK: # %bb.0: @@ -73,8 +65,6 @@ define @umulo_nxv8i8( %x, % ret %d } -declare { , } @llvm.umul.with.overflow.nxv16i8(, ) - define @umulo_nxv16i8( %x, %y) { ; CHECK-LABEL: umulo_nxv16i8: ; CHECK: # %bb.0: @@ -91,8 +81,6 @@ define @umulo_nxv16i8( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv32i8(, ) - define @umulo_nxv32i8( %x, %y) { ; CHECK-LABEL: umulo_nxv32i8: ; CHECK: # %bb.0: @@ -109,8 +97,6 @@ define @umulo_nxv32i8( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv64i8(, ) - define @umulo_nxv64i8( %x, %y) { ; CHECK-LABEL: umulo_nxv64i8: ; CHECK: # %bb.0: @@ -127,8 +113,6 @@ define @umulo_nxv64i8( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv1i16(, ) - define @umulo_nxv1i16( %x, %y) { ; CHECK-LABEL: umulo_nxv1i16: ; CHECK: # %bb.0: @@ -145,8 +129,6 @@ define @umulo_nxv1i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv2i16(, ) - define @umulo_nxv2i16( %x, %y) { ; CHECK-LABEL: umulo_nxv2i16: ; CHECK: # %bb.0: @@ -163,8 +145,6 @@ define @umulo_nxv2i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv4i16(, ) - define @umulo_nxv4i16( %x, %y) { ; CHECK-LABEL: umulo_nxv4i16: ; CHECK: # %bb.0: @@ -181,8 +161,6 @@ define @umulo_nxv4i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv8i16(, ) - define @umulo_nxv8i16( %x, %y) { ; CHECK-LABEL: umulo_nxv8i16: ; CHECK: # %bb.0: @@ -199,8 +177,6 @@ define @umulo_nxv8i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv16i16(, ) - define @umulo_nxv16i16( %x, %y) { ; CHECK-LABEL: umulo_nxv16i16: ; CHECK: # %bb.0: @@ -217,8 +193,6 @@ define @umulo_nxv16i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv32i16(, ) - define @umulo_nxv32i16( %x, %y) { ; CHECK-LABEL: umulo_nxv32i16: ; CHECK: # %bb.0: @@ -235,8 +209,6 @@ define @umulo_nxv32i16( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv1i32(, ) - define @umulo_nxv1i32( %x, %y) { ; CHECK-LABEL: umulo_nxv1i32: ; CHECK: # %bb.0: @@ -253,8 +225,6 @@ define @umulo_nxv1i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv2i32(, ) - define @umulo_nxv2i32( %x, %y) { ; CHECK-LABEL: umulo_nxv2i32: ; CHECK: # %bb.0: @@ -271,8 +241,6 @@ define @umulo_nxv2i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv4i32(, ) - define @umulo_nxv4i32( %x, %y) { ; CHECK-LABEL: umulo_nxv4i32: ; CHECK: # %bb.0: @@ -289,8 +257,6 @@ define @umulo_nxv4i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv8i32(, ) - define @umulo_nxv8i32( %x, %y) { ; CHECK-LABEL: umulo_nxv8i32: ; CHECK: # %bb.0: @@ -307,8 +273,6 @@ define @umulo_nxv8i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv16i32(, ) - define @umulo_nxv16i32( %x, %y) { ; CHECK-LABEL: umulo_nxv16i32: ; CHECK: # %bb.0: @@ -325,8 +289,6 @@ define @umulo_nxv16i32( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv1i64(, ) - define @umulo_nxv1i64( %x, %y) { ; CHECK-LABEL: umulo_nxv1i64: ; CHECK: # %bb.0: @@ -343,8 +305,6 @@ define @umulo_nxv1i64( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv2i64(, ) - define @umulo_nxv2i64( %x, %y) { ; CHECK-LABEL: umulo_nxv2i64: ; CHECK: # %bb.0: @@ -361,8 +321,6 @@ define @umulo_nxv2i64( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv4i64(, ) - define @umulo_nxv4i64( %x, %y) { ; CHECK-LABEL: umulo_nxv4i64: ; CHECK: # %bb.0: @@ -379,8 +337,6 @@ define @umulo_nxv4i64( %x, %d } -declare { , } @llvm.umul.with.overflow.nxv8i64(, ) - define @umulo_nxv8i64( %x, %y) { ; CHECK-LABEL: umulo_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll index 41d0b63285752..183cfdfdae626 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll @@ -146,15 +146,6 @@ loopIR3.i.i: ; preds = %loopIR3.i.i, %loopI br label %loopIR3.i.i } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) -declare @llvm.riscv.vrgather.vx.nxv2f32.i64(, , i64, i64) #2 -declare void @llvm.riscv.vse.nxv2f32.i64(, ptr nocapture, i64) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) -declare @llvm.stepvector.nxv1i16() -declare @llvm.vector.insert.nxv8i16.nxv1i16(, , i64 immarg) -declare @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(, , , i64) - - define void @repeat_shuffle(<2 x double> %v, ptr noalias %q) { ; CHECK-LABEL: repeat_shuffle: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir index 75e0539843ac5..d79539b819ac3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir @@ -13,10 +13,7 @@ ret %0 } - declare @llvm.riscv.vrgather.vx.nxv2f32.i64(, , i64, i64) #1 - attributes #0 = { "target-features"="+v" } - attributes #1 = { nocallback nofree nosync nounwind willreturn memory(none) "target-features"="+v" } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll b/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll index de0c4e6b84c1f..8a66131f70954 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll @@ -6,8 +6,6 @@ ; Test that we can remove trivially-poison VP operations of various kinds. -declare <4 x i32> @llvm.vp.load.v4i32.p0(ptr, <4 x i1>, i32) - define <4 x i32> @vload_v4i32_zero_evl(ptr %ptr, <4 x i1> %m) { ; CHECK-LABEL: vload_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define <4 x i32> @vload_v4i32_false_mask(ptr %ptr, i32 %evl) { ret <4 x i32> %v } -declare <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32) - define <4 x i32> @vgather_v4i32_v4i32_zero_evl(<4 x ptr> %ptrs, <4 x i1> %m) { ; CHECK-LABEL: vgather_v4i32_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -42,8 +38,6 @@ define <4 x i32> @vgather_v4i32_v4i32_false_mask(<4 x ptr> %ptrs, i32 %evl) { ret <4 x i32> %v } -declare void @llvm.vp.store.v4i32.p0(<4 x i32>, ptr, <4 x i1>, i32) - define void @vstore_v4i32_zero_evl(<4 x i32> %val, ptr %ptr, <4 x i1> %m) { ; CHECK-LABEL: vstore_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -60,8 +54,6 @@ define void @vstore_v4i32_false_mask(<4 x i32> %val, ptr %ptr, i32 %evl) { ret void } -declare void @llvm.vp.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, <4 x i1>, i32) - define void @vscatter_v4i32_zero_evl(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m) { ; CHECK-LABEL: vscatter_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -78,8 +70,6 @@ define void @vscatter_v4i32_false_mask(<4 x i32> %val, <4 x ptr> %ptrs, i32 %evl ret void } -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vadd_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vadd_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -96,8 +86,6 @@ define <4 x i32> @vadd_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vand_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vand_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -114,8 +102,6 @@ define <4 x i32> @vand_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vlshr_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vlshr_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -132,8 +118,6 @@ define <4 x i32> @vlshr_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vmul_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vmul_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -150,8 +134,6 @@ define <4 x i32> @vmul_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vor_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -168,8 +150,6 @@ define <4 x i32> @vor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsdiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsdiv_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -186,8 +166,6 @@ define <4 x i32> @vsdiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsrem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsrem_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -204,8 +182,6 @@ define <4 x i32> @vsrem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vsub_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsub_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -222,8 +198,6 @@ define <4 x i32> @vsub_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vudiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vudiv_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -240,8 +214,6 @@ define <4 x i32> @vudiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vurem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vurem_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -258,8 +230,6 @@ define <4 x i32> @vurem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - define <4 x i32> @vxor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vxor_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -276,8 +246,6 @@ define <4 x i32> @vxor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) ret <4 x i32> %s } -declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfadd_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfadd_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -294,8 +262,6 @@ define <4 x float> @vfadd_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfsub_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfsub_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -312,8 +278,6 @@ define <4 x float> @vfsub_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfmul_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfmul_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -330,8 +294,6 @@ define <4 x float> @vfmul_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfdiv_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfdiv_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -348,8 +310,6 @@ define <4 x float> @vfdiv_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) - define <4 x float> @vfrem_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfrem_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -366,8 +326,6 @@ define <4 x float> @vfrem_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 ret <4 x float> %s } -declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_add_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_add_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -384,8 +342,6 @@ define i32 @vreduce_add_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.mul.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_mul_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_mul_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -402,8 +358,6 @@ define i32 @vreduce_mul_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_and_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_and_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -420,8 +374,6 @@ define i32 @vreduce_and_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_or_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_or_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -438,8 +390,6 @@ define i32 @vreduce_or_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_xor_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_xor_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -456,8 +406,6 @@ define i32 @vreduce_xor_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ret i32 %s } -declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_smax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_smax_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -474,8 +422,6 @@ define i32 @vreduce_smax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_smin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_smin_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -492,8 +438,6 @@ define i32 @vreduce_smin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_umax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_umax_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -510,8 +454,6 @@ define i32 @vreduce_umax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32) - define i32 @vreduce_umin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_umin_v4i32_zero_evl: ; CHECK: # %bb.0: @@ -528,8 +470,6 @@ define i32 @vreduce_umin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) ret i32 %s } -declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_seq_fadd_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_seq_fadd_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -562,8 +502,6 @@ define float @vreduce_fadd_v4f32_false_mask(float %start, <4 x float> %val, i32 ret float %s } -declare float @llvm.vp.reduce.fmul.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_seq_fmul_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_seq_fmul_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -596,8 +534,6 @@ define float @vreduce_fmul_v4f32_false_mask(float %start, <4 x float> %val, i32 ret float %s } -declare float @llvm.vp.reduce.fmin.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_fmin_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmin_v4f32_zero_evl: ; CHECK: # %bb.0: @@ -614,8 +550,6 @@ define float @vreduce_fmin_v4f32_false_mask(float %start, <4 x float> %val, i32 ret float %s } -declare float @llvm.vp.reduce.fmax.v4f32(float, <4 x float>, <4 x i1>, i32) - define float @vreduce_fmax_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmax_v4f32_zero_evl: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll index 8bed3c23078e8..a42baea6961cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -70,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -92,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -114,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -136,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -158,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -180,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -202,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -224,12 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -246,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -268,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -303,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -338,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -373,13 +280,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -408,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -431,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -454,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -477,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -500,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -522,12 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -544,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -566,12 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -588,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -610,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -632,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -654,12 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -676,12 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -698,12 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -720,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -742,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -764,12 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -786,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -808,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -829,11 +611,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: @@ -851,13 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -875,13 +645,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -899,8 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -923,8 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -936,12 +695,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vcompress_um_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll index 1735a0f5a1f2b..ab43ac7cf03a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vle.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_tu_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vlse( - , - ptr, - iXLen, - iXLen); - - define @intrinsic_vlse_v_tu( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_tu: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare { , iXLen } @llvm.riscv.vleff( - , - ptr, - iXLen); - define @intrinsic_vleff_v_tu( %0, ptr %1, iXLen %2, ptr %3) nounwind { ; RV32-LABEL: intrinsic_vleff_v_tu: ; RV32: # %bb.0: # %entry @@ -79,12 +62,6 @@ entry: ret %b } -declare @llvm.riscv.vloxei.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -101,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -124,12 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -147,12 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -168,11 +127,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i8.nxv1i8( - , - , - , - iXLen); define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: @@ -190,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -213,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -236,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -258,12 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -302,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -324,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -346,12 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -368,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -390,12 +290,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -412,12 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -434,12 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -456,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,12 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -500,12 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -522,12 +386,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -544,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -566,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -588,12 +434,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -611,12 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -633,12 +467,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -655,12 +483,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -677,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -699,13 +515,6 @@ entry: ret %a } - -declare @llvm.riscv.vslide1down.nxv1i64( - , - , - i64, - iXLen); - define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -732,12 +541,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -764,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -786,12 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -808,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -830,12 +615,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -852,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -874,12 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -896,12 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -918,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -940,13 +695,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -964,13 +712,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -988,12 +729,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1010,12 +745,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1032,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1054,12 +777,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1076,17 +793,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i8.nxv1i8( - , - , - , - iXLen); -declare @llvm.riscv.vrgather.vv.nxv1i8.i32( - , - , - , - iXLen); - define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1103,12 +809,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1125,12 +825,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1147,12 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1182,12 +870,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1217,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1239,12 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1261,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1285,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i64.i64( - , - , - i64, - iXLen, - iXLen); - define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1323,12 +979,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1344,11 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( - , - , - , - iXLen); define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: @@ -1366,13 +1011,6 @@ entry: ret %a } -declare @llvm.riscv.vssra.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1390,13 +1028,6 @@ entry: ret %a } -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1414,12 +1045,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1436,12 +1061,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1458,12 +1077,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1493,12 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1528,12 +1135,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1550,12 +1151,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1594,12 +1183,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1616,12 +1199,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1638,12 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1682,12 +1247,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1704,12 +1263,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1764,12 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1786,12 +1327,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1808,11 +1343,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1828,11 +1358,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1848,11 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1868,10 +1388,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i8( - , - iXLen); - define @intrinsic_vid_v_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1886,11 +1402,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv1i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1909,11 +1420,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1929,11 +1435,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1949,11 +1450,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1969,11 +1465,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1989,11 +1480,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2009,11 +1495,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -2029,11 +1510,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2049,11 +1525,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2069,11 +1540,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2089,11 +1555,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -2109,11 +1570,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2129,11 +1585,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2149,11 +1600,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2169,11 +1615,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2189,11 +1630,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2209,11 +1645,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv1f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2229,11 +1660,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2249,11 +1675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2269,11 +1690,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2289,11 +1705,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2309,11 +1720,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2329,11 +1735,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2349,11 +1750,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2369,11 +1765,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2389,11 +1780,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -2409,13 +1795,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2433,13 +1812,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2457,13 +1829,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2481,13 +1846,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2552,13 +1910,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -2576,13 +1927,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2600,13 +1944,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -2624,11 +1961,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2644,11 +1976,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -2664,11 +1991,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -2695,11 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll index ba970e62875a9..bf6952dc78b0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ushl_sat_vec.ll @@ -1,11 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64>, <2 x i64>) -declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>) -declare <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16>, <8 x i16>) -declare <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8>, <16 x i8>) - define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; CHECK-LABEL: vec_v2i64: ; CHECK: # %bb.0: @@ -58,11 +53,6 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ret <16 x i8> %tmp } -declare @llvm.ushl.sat.nxv2i64(, ) -declare @llvm.ushl.sat.nxv4i32(, ) -declare @llvm.ushl.sat.nxv8i16(, ) -declare @llvm.ushl.sat.nxv16i8(, ) - define @vec_nxv2i64( %x, %y) nounwind { ; CHECK-LABEL: vec_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll index ba9bb84fe3608..6942169587c48 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vaadd.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll index aa3fa9a86f497..7fd02f99f618a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vaaddu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc.ll b/llvm/test/CodeGen/RISCV/rvv/vadc.ll index 6c7b81450f501..711893787819b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vadc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv64i8.nxv64i8( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv64i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -724,13 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -748,13 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -772,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -796,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -820,13 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv32i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -844,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -892,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -916,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -940,13 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv16i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -964,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1000,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1036,13 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1072,13 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vadc.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll index 33eaee89d77c0..7800d9309bfa2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare @llvm.vp.add.nxv2i1(, , , i32) - define @vadd_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define @vadd_vv_nxv2i1( %va, %v } -declare @llvm.vp.add.nxv4i1(, , , i32) - define @vadd_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define @vadd_vv_nxv4i1( %va, %v } -declare @llvm.vp.add.nxv8i1(, , , i32) - define @vadd_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define @vadd_vv_nxv8i1( %va, %v } -declare @llvm.vp.add.nxv16i1(, , , i32) - define @vadd_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i1: ; CHECK: # %bb.0: @@ -53,8 +44,6 @@ define @vadd_vv_nxv16i1( %va, %v } -declare @llvm.vp.add.nxv32i1(, , , i32) - define @vadd_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll index 946c0bbd7ff6f..c64b755051898 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.add.nxv8i7(, , , i32) - define @vadd_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vadd_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.add.nxv1i8(, , , i32) - define @vadd_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i8: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vadd_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv2i8(, , , i32) - define @vadd_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i8: ; CHECK: # %bb.0: @@ -162,8 +156,6 @@ define @vadd_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv3i8(, , , i32) - define @vadd_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv3i8: ; CHECK: # %bb.0: @@ -228,8 +220,6 @@ define @vadd_vi_nxv3i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv4i8(, , , i32) - define @vadd_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i8: ; CHECK: # %bb.0: @@ -294,8 +284,6 @@ define @vadd_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv8i8(, , , i32) - define @vadd_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i8: ; CHECK: # %bb.0: @@ -360,8 +348,6 @@ define @vadd_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.add.nxv16i8(, , , i32) - define @vadd_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i8: ; CHECK: # %bb.0: @@ -426,8 +412,6 @@ define @vadd_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv32i8(, , , i32) - define @vadd_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i8: ; CHECK: # %bb.0: @@ -492,8 +476,6 @@ define @vadd_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv64i8(, , , i32) - define @vadd_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv64i8: ; CHECK: # %bb.0: @@ -560,8 +542,6 @@ define @vadd_vi_nxv64i8_unmasked( %va, i32 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.add.nxv128i8(, , , i32) - define @vadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv128i8: ; CHECK: # %bb.0: @@ -610,8 +590,6 @@ define @vadd_vi_nxv128i8_unmasked( %va, i ret %v } -declare @llvm.vp.add.nxv1i16(, , , i32) - define @vadd_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i16: ; CHECK: # %bb.0: @@ -676,8 +654,6 @@ define @vadd_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv2i16(, , , i32) - define @vadd_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i16: ; CHECK: # %bb.0: @@ -742,8 +718,6 @@ define @vadd_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv4i16(, , , i32) - define @vadd_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i16: ; CHECK: # %bb.0: @@ -808,8 +782,6 @@ define @vadd_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv8i16(, , , i32) - define @vadd_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i16: ; CHECK: # %bb.0: @@ -874,8 +846,6 @@ define @vadd_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv16i16(, , , i32) - define @vadd_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i16: ; CHECK: # %bb.0: @@ -940,8 +910,6 @@ define @vadd_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.add.nxv32i16(, , , i32) - define @vadd_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1006,8 +974,6 @@ define @vadd_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.add.nxv1i32(, , , i32) - define @vadd_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1072,8 +1038,6 @@ define @vadd_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv2i32(, , , i32) - define @vadd_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1138,8 +1102,6 @@ define @vadd_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv4i32(, , , i32) - define @vadd_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1204,8 +1166,6 @@ define @vadd_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv8i32(, , , i32) - define @vadd_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1270,8 +1230,6 @@ define @vadd_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv16i32(, , , i32) - define @vadd_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1338,8 +1296,6 @@ define @vadd_vi_nxv16i32_unmasked( %va, i ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.add.nxv32i32(, , , i32) - define @vadd_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1391,8 +1347,6 @@ define @vadd_vi_nxv32i32_unmasked( %va, i ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vadd_vi_nxv32i32_evl_nx8( %va, %m) { ; RV32-LABEL: vadd_vi_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1454,8 +1408,6 @@ define @vadd_vi_nxv32i32_evl_nx16( %va, < ret %v } -declare @llvm.vp.add.nxv1i64(, , , i32) - define @vadd_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1548,8 +1500,6 @@ define @vadd_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv2i64(, , , i32) - define @vadd_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1642,8 +1592,6 @@ define @vadd_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv4i64(, , , i32) - define @vadd_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1736,8 +1684,6 @@ define @vadd_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.add.nxv8i64(, , , i32) - define @vadd_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll index bdc62a974f098..8d0259a426d04 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -557,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -579,13 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -696,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -742,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -764,13 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -788,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -810,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1020,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1042,13 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1088,13 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1134,13 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1180,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1226,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1250,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1272,13 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1296,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1318,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1364,13 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1388,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1410,13 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1456,13 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1480,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1526,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,13 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1594,13 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1618,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1640,13 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1664,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1686,13 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1710,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1732,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1756,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1778,13 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1802,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1824,13 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1848,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1925,13 +1399,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1961,12 +1428,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1995,13 +1456,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2031,12 +1485,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2065,13 +1513,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1542,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,13 +1570,6 @@ entry: ret %a } -declare @llvm.riscv.vadd.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll index 9d394a1ee3ff7..9dc0006b7a736 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesdf.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdf.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdf_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdf_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll index f21bdcac032f7..e94f3b102a093 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesdm.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesdm.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesdm_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesdm_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll index ee11786583d7f..fff4d3a19753e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesef.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesef.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesef_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesef_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll index 65486e119842b..bb310fe61ce34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesem.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vaesem.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesem_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesem_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll index 94eb803169ce9..5ea5b5eb4f601 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaeskf1.nxv4i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vaeskf1_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf1.nxv8i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vaeskf1_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf1.nxv16i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vaeskf1_vi_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll index 5abe0821d2299..08f22b58b9f59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaeskf2.nxv4i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vaeskf2_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf2.nxv8i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vaeskf2_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vaeskf2.nxv16i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vaeskf2_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll index 2453119ce92d3..6a0b9f52c2b4b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vaesz.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesz_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesz_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vaesz.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesz_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesz_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vaesz.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vaesz_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaesz_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll index 4866bb06f19ec..eaa272f425086 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.and.nxv8i7(, , , i32) - define @vand_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vand_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.and.nxv1i8(, , , i32) - define @vand_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i8: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define @vand_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv2i8(, , , i32) - define @vand_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i8: ; CHECK: # %bb.0: @@ -150,8 +144,6 @@ define @vand_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv4i8(, , , i32) - define @vand_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i8: ; CHECK: # %bb.0: @@ -216,8 +208,6 @@ define @vand_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv8i8(, , , i32) - define @vand_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i8: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vand_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.and.nxv16i8(, , , i32) - define @vand_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i8: ; CHECK: # %bb.0: @@ -348,8 +336,6 @@ define @vand_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv32i8(, , , i32) - define @vand_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i8: ; CHECK: # %bb.0: @@ -414,8 +400,6 @@ define @vand_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv64i8(, , , i32) - define @vand_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv64i8: ; CHECK: # %bb.0: @@ -480,8 +464,6 @@ define @vand_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv1i16(, , , i32) - define @vand_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i16: ; CHECK: # %bb.0: @@ -546,8 +528,6 @@ define @vand_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv2i16(, , , i32) - define @vand_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i16: ; CHECK: # %bb.0: @@ -612,8 +592,6 @@ define @vand_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv4i16(, , , i32) - define @vand_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i16: ; CHECK: # %bb.0: @@ -678,8 +656,6 @@ define @vand_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv8i16(, , , i32) - define @vand_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i16: ; CHECK: # %bb.0: @@ -744,8 +720,6 @@ define @vand_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv14i16(, , , i32) - define @vand_vv_nxv14i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv14i16: ; CHECK: # %bb.0: @@ -810,8 +784,6 @@ define @vand_vi_nxv14i16_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv16i16(, , , i32) - define @vand_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i16: ; CHECK: # %bb.0: @@ -876,8 +848,6 @@ define @vand_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv32i16(, , , i32) - define @vand_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i16: ; CHECK: # %bb.0: @@ -954,8 +924,6 @@ define @vand_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv1i32(, , , i32) - define @vand_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1020,8 +988,6 @@ define @vand_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv2i32(, , , i32) - define @vand_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1086,8 +1052,6 @@ define @vand_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv4i32(, , , i32) - define @vand_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1152,8 +1116,6 @@ define @vand_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv8i32(, , , i32) - define @vand_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1218,8 +1180,6 @@ define @vand_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv16i32(, , , i32) - define @vand_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1284,8 +1244,6 @@ define @vand_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.and.nxv1i64(, , , i32) - define @vand_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1378,8 +1336,6 @@ define @vand_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv2i64(, , , i32) - define @vand_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1472,8 +1428,6 @@ define @vand_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv4i64(, , , i32) - define @vand_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1566,8 +1520,6 @@ define @vand_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.and.nxv8i64(, , , i32) - define @vand_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vand.ll b/llvm/test/CodeGen/RISCV/rvv/vand.ll index fafc25e2a5819..d1c8714a6abdc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vand.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vand.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vand.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll index f295bd8d74df3..fe477d8a6f8f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll @@ -2234,8 +2234,6 @@ identity: ret %x } -declare i64 @llvm.vscale.i64() - define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) { ; CHECK-RV32-LABEL: vand_vx_loop_hoisted_not: ; CHECK-RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll index 5d29b266546f5..c08e3d695691b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB64 -declare @llvm.vp.and.nxv1i8(, , , i32) -declare @llvm.vp.xor.nxv1i8(, , , i32) - define @vandn_vv_vp_nxv1i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i8: ; CHECK: # %bb.0: @@ -63,9 +60,6 @@ define @vandn_vx_vp_nxv1i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv2i8(, , , i32) -declare @llvm.vp.xor.nxv2i8(, , , i32) - define @vandn_vv_vp_nxv2i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i8: ; CHECK: # %bb.0: @@ -122,9 +116,6 @@ define @vandn_vx_vp_nxv2i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv4i8(, , , i32) -declare @llvm.vp.xor.nxv4i8(, , , i32) - define @vandn_vv_vp_nxv4i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i8: ; CHECK: # %bb.0: @@ -181,9 +172,6 @@ define @vandn_vx_vp_nxv4i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv8i8(, , , i32) -declare @llvm.vp.xor.nxv8i8(, , , i32) - define @vandn_vv_vp_nxv8i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i8: ; CHECK: # %bb.0: @@ -240,9 +228,6 @@ define @vandn_vx_vp_nxv8i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv16i8(, , , i32) -declare @llvm.vp.xor.nxv16i8(, , , i32) - define @vandn_vv_vp_nxv16i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv16i8: ; CHECK: # %bb.0: @@ -299,9 +284,6 @@ define @vandn_vx_vp_nxv16i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv32i8(, , , i32) -declare @llvm.vp.xor.nxv32i8(, , , i32) - define @vandn_vv_vp_nxv32i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv32i8: ; CHECK: # %bb.0: @@ -358,9 +340,6 @@ define @vandn_vx_vp_nxv32i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv64i8(, , , i32) -declare @llvm.vp.xor.nxv64i8(, , , i32) - define @vandn_vv_vp_nxv64i8( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv64i8: ; CHECK: # %bb.0: @@ -417,9 +396,6 @@ define @vandn_vx_vp_nxv64i8(i8 %a, %b, %x } -declare @llvm.vp.and.nxv1i16(, , , i32) -declare @llvm.vp.xor.nxv1i16(, , , i32) - define @vandn_vv_vp_nxv1i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i16: ; CHECK: # %bb.0: @@ -476,9 +452,6 @@ define @vandn_vx_vp_nxv1i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv2i16(, , , i32) -declare @llvm.vp.xor.nxv2i16(, , , i32) - define @vandn_vv_vp_nxv2i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i16: ; CHECK: # %bb.0: @@ -535,9 +508,6 @@ define @vandn_vx_vp_nxv2i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv4i16(, , , i32) -declare @llvm.vp.xor.nxv4i16(, , , i32) - define @vandn_vv_vp_nxv4i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i16: ; CHECK: # %bb.0: @@ -594,9 +564,6 @@ define @vandn_vx_vp_nxv4i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv8i16(, , , i32) -declare @llvm.vp.xor.nxv8i16(, , , i32) - define @vandn_vv_vp_nxv8i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i16: ; CHECK: # %bb.0: @@ -653,9 +620,6 @@ define @vandn_vx_vp_nxv8i16(i16 %a, %b, %x } -declare @llvm.vp.and.nxv16i16(, , , i32) -declare @llvm.vp.xor.nxv16i16(, , , i32) - define @vandn_vv_vp_nxv16i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv16i16: ; CHECK: # %bb.0: @@ -712,9 +676,6 @@ define @vandn_vx_vp_nxv16i16(i16 %a, %b, ret %x } -declare @llvm.vp.and.nxv32i16(, , , i32) -declare @llvm.vp.xor.nxv32i16(, , , i32) - define @vandn_vv_vp_nxv32i16( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv32i16: ; CHECK: # %bb.0: @@ -771,9 +732,6 @@ define @vandn_vx_vp_nxv32i16(i16 %a, %b, ret %x } -declare @llvm.vp.and.nxv1i32(, , , i32) -declare @llvm.vp.xor.nxv1i32(, , , i32) - define @vandn_vv_vp_nxv1i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i32: ; CHECK: # %bb.0: @@ -830,9 +788,6 @@ define @vandn_vx_vp_nxv1i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv2i32(, , , i32) -declare @llvm.vp.xor.nxv2i32(, , , i32) - define @vandn_vv_vp_nxv2i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i32: ; CHECK: # %bb.0: @@ -889,9 +844,6 @@ define @vandn_vx_vp_nxv2i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv4i32(, , , i32) -declare @llvm.vp.xor.nxv4i32(, , , i32) - define @vandn_vv_vp_nxv4i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i32: ; CHECK: # %bb.0: @@ -948,9 +900,6 @@ define @vandn_vx_vp_nxv4i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv8i32(, , , i32) -declare @llvm.vp.xor.nxv8i32(, , , i32) - define @vandn_vv_vp_nxv8i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i32: ; CHECK: # %bb.0: @@ -1007,9 +956,6 @@ define @vandn_vx_vp_nxv8i32(i32 %a, %b, %x } -declare @llvm.vp.and.nxv16i32(, , , i32) -declare @llvm.vp.xor.nxv16i32(, , , i32) - define @vandn_vv_vp_nxv16i32( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv16i32: ; CHECK: # %bb.0: @@ -1066,9 +1012,6 @@ define @vandn_vx_vp_nxv16i32(i32 %a, %b, ret %x } -declare @llvm.vp.and.nxv1i64(, , , i32) -declare @llvm.vp.xor.nxv1i64(, , , i32) - define @vandn_vv_vp_nxv1i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv1i64: ; CHECK: # %bb.0: @@ -1157,9 +1100,6 @@ define @vandn_vx_vp_nxv1i64(i64 %a, %b, %x } -declare @llvm.vp.and.nxv2i64(, , , i32) -declare @llvm.vp.xor.nxv2i64(, , , i32) - define @vandn_vv_vp_nxv2i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv2i64: ; CHECK: # %bb.0: @@ -1248,9 +1188,6 @@ define @vandn_vx_vp_nxv2i64(i64 %a, %b, %x } -declare @llvm.vp.and.nxv4i64(, , , i32) -declare @llvm.vp.xor.nxv4i64(, , , i32) - define @vandn_vv_vp_nxv4i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv4i64: ; CHECK: # %bb.0: @@ -1339,9 +1276,6 @@ define @vandn_vx_vp_nxv4i64(i64 %a, %b, %x } -declare @llvm.vp.and.nxv8i64(, , , i32) -declare @llvm.vp.xor.nxv8i64(, , , i32) - define @vandn_vv_vp_nxv8i64( %a, %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vandn_vv_vp_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn.ll b/llvm/test/CodeGen/RISCV/rvv/vandn.ll index b346207a5339d..88a51658e4ef3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vandn.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vandn_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vandn_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vandn_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vandn_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vandn_vx_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vandn_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vandn_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vandn.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vandn_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub.ll b/llvm/test/CodeGen/RISCV/rvv/vasub.ll index 1dfba884d9404..2b7f8dbd34cf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vasub.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vasub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll index 24fa668f7955e..c96a467bb425a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,13 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,13 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,13 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,13 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,13 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +320,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,13 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,13 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +425,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,13 +442,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +496,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,13 +513,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +566,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,13 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +672,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +759,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +813,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1160,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1231,13 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1279,13 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1304,12 +953,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1327,13 +970,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1352,12 +988,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv64i8.i8( - , - , - i8, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1005,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1400,12 +1023,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1040,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1448,12 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1075,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1496,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1110,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1544,12 +1128,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1145,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1592,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1180,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1640,12 +1198,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv32i16.i16( - , - , - i16, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1663,13 +1215,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1688,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1711,13 +1250,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1759,13 +1285,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1807,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1832,12 +1338,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1855,13 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1880,12 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv16i32.i32( - , - , - i32, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1903,13 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1928,12 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv1i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1964,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2002,12 +1469,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv2i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2038,13 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2076,12 +1530,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv4i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2112,13 +1560,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2150,12 +1591,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.nxv8i64.i64( - , - , - i64, - iXLen, iXLen); - define @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2186,13 +1621,6 @@ entry: ret %a } -declare @llvm.riscv.vasubu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen); - define @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll index d8a98945e1192..57ba17ea78e99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vbrev.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vbrev.nxv1i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv16i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv32i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv64i8( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv1i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv16i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv32i16( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv1i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv16i32( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv1i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv2i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv4i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.nxv8i64( - , - , - iXLen); - define @intrinsic_vbrev_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll index 77ee4b1ac14a8..0edcf2417cd74 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vbrev8.nxv1i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv16i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv32i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv64i8( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv1i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv16i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv32i16( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv1i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv16i32( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv1i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv2i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv4i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.nxv8i64( - , - , - iXLen); - define @intrinsic_vbrev8_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vbrev8.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vbrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll index 5452191de30a3..8ea8edf218385 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vclmul.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vclmul_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmul_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -227,14 +165,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -264,12 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -298,14 +222,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -335,12 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -369,14 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -406,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmul_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmul_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -440,14 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vclmul.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmul_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll index b1acb6d1ca3d9..e5b09cc067c5b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vclmulh.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vclmulh_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -227,14 +165,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -264,12 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -298,14 +222,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -335,12 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -369,14 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -406,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vclmulh_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -440,14 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vclmulh.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vclmulh_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vclz.ll b/llvm/test/CodeGen/RISCV/rvv/vclz.ll index 8e651fb3aa201..cea7523a450c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vclz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vclz.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vclz.nxv1i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv16i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv32i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv64i8( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv1i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv16i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv32i16( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv1i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv16i32( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv1i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv2i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv4i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.nxv8i64( - , - , - iXLen); - define @intrinsic_vclz_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vclz_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vclz.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vclz_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll index 5ee82e6d95d4d..faf3a8eac0aac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vcompress.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -70,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -92,12 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -114,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -136,12 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -158,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -180,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -202,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -224,12 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -246,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -268,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -290,12 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -312,12 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -334,12 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -356,12 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -378,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -400,12 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -422,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -444,12 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -466,12 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -488,12 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -510,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -532,12 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -576,12 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -598,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -620,12 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -642,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -664,12 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -686,12 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -708,12 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -730,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -752,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -774,12 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -796,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -818,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -840,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -862,12 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -884,12 +644,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -906,12 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -928,12 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vcompress.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vcompress_vm_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll index 2f5fde3bb3b20..b6ebe3ff2556e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll @@ -372,8 +372,6 @@ define @vfsgnj_vv_nxv32bf16_unmasked( %v } -declare @llvm.vp.copysign.nxv1f16(, , , i32) - define @vfsgnj_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -434,8 +432,6 @@ define @vfsgnj_vv_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv2f16(, , , i32) - define @vfsgnj_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -496,8 +492,6 @@ define @vfsgnj_vv_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv4f16(, , , i32) - define @vfsgnj_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -558,8 +552,6 @@ define @vfsgnj_vv_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv8f16(, , , i32) - define @vfsgnj_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -620,8 +612,6 @@ define @vfsgnj_vv_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.copysign.nxv16f16(, , , i32) - define @vfsgnj_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -682,8 +672,6 @@ define @vfsgnj_vv_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.copysign.nxv32f16(, , , i32) - define @vfsgnj_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsgnj_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -744,8 +732,6 @@ define @vfsgnj_vv_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.copysign.nxv1f32(, , , i32) - define @vfsgnj_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f32: ; CHECK: # %bb.0: @@ -766,8 +752,6 @@ define @vfsgnj_vv_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv2f32(, , , i32) - define @vfsgnj_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f32: ; CHECK: # %bb.0: @@ -788,8 +772,6 @@ define @vfsgnj_vv_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv4f32(, , , i32) - define @vfsgnj_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f32: ; CHECK: # %bb.0: @@ -810,8 +792,6 @@ define @vfsgnj_vv_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv8f32(, , , i32) - define @vfsgnj_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f32: ; CHECK: # %bb.0: @@ -832,8 +812,6 @@ define @vfsgnj_vv_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.copysign.nxv16f32(, , , i32) - define @vfsgnj_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv16f32: ; CHECK: # %bb.0: @@ -854,8 +832,6 @@ define @vfsgnj_vv_nxv16f32_unmasked( ret %v } -declare @llvm.vp.copysign.nxv1f64(, , , i32) - define @vfsgnj_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f64: ; CHECK: # %bb.0: @@ -876,8 +852,6 @@ define @vfsgnj_vv_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.copysign.nxv2f64(, , , i32) - define @vfsgnj_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f64: ; CHECK: # %bb.0: @@ -898,8 +872,6 @@ define @vfsgnj_vv_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.copysign.nxv4f64(, , , i32) - define @vfsgnj_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f64: ; CHECK: # %bb.0: @@ -920,8 +892,6 @@ define @vfsgnj_vv_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.copysign.nxv8f64(, , , i32) - define @vfsgnj_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll index 6b35e4767b239..6b85cd3e33054 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare iXLen @llvm.riscv.vcpop.iXLen.nxv1i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -35,11 +31,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -71,10 +62,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv2i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -89,11 +76,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -111,10 +93,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv4i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -129,11 +107,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -151,10 +124,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv8i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -169,11 +138,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -191,10 +155,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv16i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -209,11 +169,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -231,10 +186,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv32i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -249,11 +200,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -271,10 +217,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.iXLen.nxv64i1( - , - iXLen); - define iXLen @intrinsic_vcpop_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -289,11 +231,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1( - , - , - iXLen); - define iXLen @intrinsic_vcpop_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll index 0429bcd93c1b7..6c37679836e28 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vcpopv.nxv1i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv16i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv32i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv64i8( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv1i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv16i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv32i16( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv1i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv16i32( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv1i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv2i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv4i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.nxv8i64( - , - , - iXLen); - define @intrinsic_vcpopv_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vcpopv.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vcpopv_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vctz.ll b/llvm/test/CodeGen/RISCV/rvv/vctz.ll index 67cd5d5430e38..e1b4915fa7dd4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vctz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vctz.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vctz.nxv1i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv16i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv32i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv64i8( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv1i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv16i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv32i16( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv1i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv16i32( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv1i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv2i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv4i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.nxv8i64( - , - , - iXLen); - define @intrinsic_vctz_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vctz.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vctz_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll index 03e4e1f445bee..e2b26ce9d1810 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sdiv.nxv8i7(, , , i32) - define @vdiv_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vdiv_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.sdiv.nxv1i8(, , , i32) - define @vdiv_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i8: ; CHECK: # %bb.0: @@ -69,8 +65,6 @@ define @vdiv_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv2i8(, , , i32) - define @vdiv_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i8: ; CHECK: # %bb.0: @@ -115,8 +109,6 @@ define @vdiv_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv3i8(, , , i32) - define @vdiv_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv3i8: ; CHECK: # %bb.0: @@ -127,8 +119,6 @@ define @vdiv_vv_nxv3i8( %va, %v } -declare @llvm.vp.sdiv.nxv4i8(, , , i32) - define @vdiv_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i8: ; CHECK: # %bb.0: @@ -173,8 +163,6 @@ define @vdiv_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv8i8(, , , i32) - define @vdiv_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i8: ; CHECK: # %bb.0: @@ -219,8 +207,6 @@ define @vdiv_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sdiv.nxv16i8(, , , i32) - define @vdiv_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i8: ; CHECK: # %bb.0: @@ -265,8 +251,6 @@ define @vdiv_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sdiv.nxv32i8(, , , i32) - define @vdiv_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i8: ; CHECK: # %bb.0: @@ -311,8 +295,6 @@ define @vdiv_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sdiv.nxv64i8(, , , i32) - define @vdiv_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv64i8: ; CHECK: # %bb.0: @@ -357,8 +339,6 @@ define @vdiv_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sdiv.nxv1i16(, , , i32) - define @vdiv_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i16: ; CHECK: # %bb.0: @@ -403,8 +383,6 @@ define @vdiv_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv2i16(, , , i32) - define @vdiv_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i16: ; CHECK: # %bb.0: @@ -449,8 +427,6 @@ define @vdiv_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv4i16(, , , i32) - define @vdiv_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i16: ; CHECK: # %bb.0: @@ -495,8 +471,6 @@ define @vdiv_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv8i16(, , , i32) - define @vdiv_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i16: ; CHECK: # %bb.0: @@ -541,8 +515,6 @@ define @vdiv_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sdiv.nxv16i16(, , , i32) - define @vdiv_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i16: ; CHECK: # %bb.0: @@ -587,8 +559,6 @@ define @vdiv_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.sdiv.nxv32i16(, , , i32) - define @vdiv_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i16: ; CHECK: # %bb.0: @@ -633,8 +603,6 @@ define @vdiv_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.sdiv.nxv1i32(, , , i32) - define @vdiv_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i32: ; CHECK: # %bb.0: @@ -679,8 +647,6 @@ define @vdiv_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv2i32(, , , i32) - define @vdiv_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i32: ; CHECK: # %bb.0: @@ -725,8 +691,6 @@ define @vdiv_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv4i32(, , , i32) - define @vdiv_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i32: ; CHECK: # %bb.0: @@ -771,8 +735,6 @@ define @vdiv_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv8i32(, , , i32) - define @vdiv_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i32: ; CHECK: # %bb.0: @@ -817,8 +779,6 @@ define @vdiv_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sdiv.nxv16i32(, , , i32) - define @vdiv_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i32: ; CHECK: # %bb.0: @@ -863,8 +823,6 @@ define @vdiv_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.sdiv.nxv1i64(, , , i32) - define @vdiv_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i64: ; CHECK: # %bb.0: @@ -937,8 +895,6 @@ define @vdiv_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sdiv.nxv2i64(, , , i32) - define @vdiv_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1011,8 +967,6 @@ define @vdiv_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sdiv.nxv4i64(, , , i32) - define @vdiv_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1085,8 +1039,6 @@ define @vdiv_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sdiv.nxv8i64(, , , i32) - define @vdiv_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll index 122ebe50704ab..b2a7f27cb23bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vdiv.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll index 2f35f91d77a4e..de278dc2e748d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.udiv.nxv8i7(, , , i32) - define @vdivu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vdivu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.udiv.nxv1i8(, , , i32) - define @vdivu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -68,8 +64,6 @@ define @vdivu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv2i8(, , , i32) - define @vdivu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -114,8 +108,6 @@ define @vdivu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv3i8(, , , i32) - define @vdivu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -126,8 +118,6 @@ define @vdivu_vv_nxv3i8( %va, %v } -declare @llvm.vp.udiv.nxv4i8(, , , i32) - define @vdivu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vdivu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv8i8(, , , i32) - define @vdivu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -218,8 +206,6 @@ define @vdivu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.udiv.nxv16i8(, , , i32) - define @vdivu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -264,8 +250,6 @@ define @vdivu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.udiv.nxv32i8(, , , i32) - define @vdivu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -310,8 +294,6 @@ define @vdivu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.udiv.nxv64i8(, , , i32) - define @vdivu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -356,8 +338,6 @@ define @vdivu_vx_nxv64i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.udiv.nxv1i16(, , , i32) - define @vdivu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -402,8 +382,6 @@ define @vdivu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv2i16(, , , i32) - define @vdivu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -448,8 +426,6 @@ define @vdivu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv4i16(, , , i32) - define @vdivu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -494,8 +470,6 @@ define @vdivu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv8i16(, , , i32) - define @vdivu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -540,8 +514,6 @@ define @vdivu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.udiv.nxv16i16(, , , i32) - define @vdivu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -586,8 +558,6 @@ define @vdivu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.udiv.nxv32i16(, , , i32) - define @vdivu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -632,8 +602,6 @@ define @vdivu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.udiv.nxv1i32(, , , i32) - define @vdivu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -678,8 +646,6 @@ define @vdivu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv2i32(, , , i32) - define @vdivu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -724,8 +690,6 @@ define @vdivu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv4i32(, , , i32) - define @vdivu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -770,8 +734,6 @@ define @vdivu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv8i32(, , , i32) - define @vdivu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -816,8 +778,6 @@ define @vdivu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.udiv.nxv16i32(, , , i32) - define @vdivu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -862,8 +822,6 @@ define @vdivu_vx_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.udiv.nxv1i64(, , , i32) - define @vdivu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -936,8 +894,6 @@ define @vdivu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.udiv.nxv2i64(, , , i32) - define @vdivu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1010,8 +966,6 @@ define @vdivu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.udiv.nxv4i64(, , , i32) - define @vdivu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1084,8 +1038,6 @@ define @vdivu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.udiv.nxv8i64(, , , i32) - define @vdivu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll index af05f09293546..847738f0dc140 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vdivu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll index 1df4076aa2069..a3d634f1d7591 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll @@ -322,15 +322,3 @@ define double @extract_last_double_scalable( %data, , <16 x i1>, i8) -declare i16 @llvm.experimental.vector.extract.last.active.v8i16(<8 x i16>, <8 x i1>, i16) -declare i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32>, <4 x i1>, i32) -declare i64 @llvm.experimental.vector.extract.last.active.v2i64(<2 x i64>, <2 x i1>, i64) -declare float @llvm.experimental.vector.extract.last.active.v4f32(<4 x float>, <4 x i1>, float) -declare double @llvm.experimental.vector.extract.last.active.v2f64(<2 x double>, <2 x i1>, double) -declare i8 @llvm.experimental.vector.extract.last.active.nxv16i8(, , i8) -declare i16 @llvm.experimental.vector.extract.last.active.nxv8i16(, , i16) -declare i32 @llvm.experimental.vector.extract.last.active.nxv4i32(, , i32) -declare i64 @llvm.experimental.vector.extract.last.active.nxv2i64(, , i64) -declare float @llvm.experimental.vector.extract.last.active.nxv4f32(, , float) -declare double @llvm.experimental.vector.extract.last.active.nxv2f64(, , double) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll index d132be96775ac..abe8e173b636f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll @@ -1,31 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - i32) - -declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( - , - , - , - , - i32, i32) - -declare @llvm.riscv.vsub.nxv1i8.nxv1i8( - , - , - , - i32) - -declare @llvm.riscv.vmul.nxv1i8.nxv1i8( - , - , - , - i32) - define @simple_vadd_vv( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: simple_vadd_vv: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll index 677a0aa712b5d..e3f43cd904198 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -10,8 +10,6 @@ ; Tests assume VLEN=128 or vscale_range_min=2. -declare @llvm.vector.splice.nxv1i1(, , i32) - define @splice_nxv1i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv1i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -96,8 +94,6 @@ define @splice_nxv1i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i1(, , i32) - define @splice_nxv2i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv2i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -182,8 +178,6 @@ define @splice_nxv2i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i1(, , i32) - define @splice_nxv4i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv4i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -268,8 +262,6 @@ define @splice_nxv4i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i1(, , i32) - define @splice_nxv8i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv8i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -350,8 +342,6 @@ define @splice_nxv8i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i1(, , i32) - define @splice_nxv16i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv16i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -436,8 +426,6 @@ define @splice_nxv16i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv32i1(, , i32) - define @splice_nxv32i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv32i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -522,8 +510,6 @@ define @splice_nxv32i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv64i1(, , i32) - define @splice_nxv64i1_offset_negone( %a, %b) #0 { ; NOVLDEP-LABEL: splice_nxv64i1_offset_negone: ; NOVLDEP: # %bb.0: @@ -608,8 +594,6 @@ define @splice_nxv64i1_offset_max( %a, %res } -declare @llvm.vector.splice.nxv1i8(, , i32) - define @splice_nxv1i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i8_offset_zero: ; CHECK: # %bb.0: @@ -693,8 +677,6 @@ define @splice_nxv1i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i8(, , i32) - define @splice_nxv2i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i8_offset_zero: ; CHECK: # %bb.0: @@ -778,8 +760,6 @@ define @splice_nxv2i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i8(, , i32) - define @splice_nxv4i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i8_offset_zero: ; CHECK: # %bb.0: @@ -863,8 +843,6 @@ define @splice_nxv4i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i8(, , i32) - define @splice_nxv8i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i8_offset_zero: ; CHECK: # %bb.0: @@ -942,8 +920,6 @@ define @splice_nxv8i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i8(, , i32) - define @splice_nxv16i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i8_offset_zero: ; CHECK: # %bb.0: @@ -1029,8 +1005,6 @@ define @splice_nxv16i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv32i8(, , i32) - define @splice_nxv32i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i8_offset_zero: ; CHECK: # %bb.0: @@ -1118,8 +1092,6 @@ define @splice_nxv32i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv64i8(, , i32) - define @splice_nxv64i8_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv64i8_offset_zero: ; CHECK: # %bb.0: @@ -1207,8 +1179,6 @@ define @splice_nxv64i8_offset_max( %a, %res } -declare @llvm.vector.splice.nxv1i16(, , i32) - define @splice_nxv1i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i16_offset_zero: ; CHECK: # %bb.0: @@ -1292,8 +1262,6 @@ define @splice_nxv1i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i16(, , i32) - define @splice_nxv2i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i16_offset_zero: ; CHECK: # %bb.0: @@ -1377,8 +1345,6 @@ define @splice_nxv2i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i16(, , i32) - define @splice_nxv4i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i16_offset_zero: ; CHECK: # %bb.0: @@ -1462,8 +1428,6 @@ define @splice_nxv4i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i16(, , i32) - define @splice_nxv8i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i16_offset_zero: ; CHECK: # %bb.0: @@ -1541,8 +1505,6 @@ define @splice_nxv8i16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i16(, , i32) - define @splice_nxv16i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i16_offset_zero: ; CHECK: # %bb.0: @@ -1628,8 +1590,6 @@ define @splice_nxv16i16_offset_max( %a, < ret %res } -declare @llvm.vector.splice.nxv32i16(, , i32) - define @splice_nxv32i16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i16_offset_zero: ; CHECK: # %bb.0: @@ -1717,8 +1677,6 @@ define @splice_nxv32i16_offset_max( %a, < ret %res } -declare @llvm.vector.splice.nxv1i32(, , i32) - define @splice_nxv1i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i32_offset_zero: ; CHECK: # %bb.0: @@ -1802,8 +1760,6 @@ define @splice_nxv1i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i32(, , i32) - define @splice_nxv2i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i32_offset_zero: ; CHECK: # %bb.0: @@ -1887,8 +1843,6 @@ define @splice_nxv2i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i32(, , i32) - define @splice_nxv4i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i32_offset_zero: ; CHECK: # %bb.0: @@ -1972,8 +1926,6 @@ define @splice_nxv4i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i32(, , i32) - define @splice_nxv8i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i32_offset_zero: ; CHECK: # %bb.0: @@ -2051,8 +2003,6 @@ define @splice_nxv8i32_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16i32(, , i32) - define @splice_nxv16i32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i32_offset_zero: ; CHECK: # %bb.0: @@ -2138,8 +2088,6 @@ define @splice_nxv16i32_offset_max( %a, < ret %res } -declare @llvm.vector.splice.nxv1i64(, , i32) - define @splice_nxv1i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i64_offset_zero: ; CHECK: # %bb.0: @@ -2223,8 +2171,6 @@ define @splice_nxv1i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2i64(, , i32) - define @splice_nxv2i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i64_offset_zero: ; CHECK: # %bb.0: @@ -2308,8 +2254,6 @@ define @splice_nxv2i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4i64(, , i32) - define @splice_nxv4i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i64_offset_zero: ; CHECK: # %bb.0: @@ -2393,8 +2337,6 @@ define @splice_nxv4i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8i64(, , i32) - define @splice_nxv8i64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i64_offset_zero: ; CHECK: # %bb.0: @@ -2472,8 +2414,6 @@ define @splice_nxv8i64_offset_max( %a, %res } -declare @llvm.vector.splice.nxv1bf16(, , i32) - define @splice_nxv1bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1bf16_offset_zero: ; CHECK: # %bb.0: @@ -2557,8 +2497,6 @@ define @splice_nxv1bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv2bf16(, , i32) - define @splice_nxv2bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2bf16_offset_zero: ; CHECK: # %bb.0: @@ -2642,8 +2580,6 @@ define @splice_nxv2bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv4bf16(, , i32) - define @splice_nxv4bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4bf16_offset_zero: ; CHECK: # %bb.0: @@ -2727,8 +2663,6 @@ define @splice_nxv4bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv8bf16(, , i32) - define @splice_nxv8bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8bf16_offset_zero: ; CHECK: # %bb.0: @@ -2806,8 +2740,6 @@ define @splice_nxv8bf16_offset_max( % ret %res } -declare @llvm.vector.splice.nxv16bf16(, , i32) - define @splice_nxv16bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16bf16_offset_zero: ; CHECK: # %bb.0: @@ -2893,8 +2825,6 @@ define @splice_nxv16bf16_offset_max( %res } -declare @llvm.vector.splice.nxv32bf16(, , i32) - define @splice_nxv32bf16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32bf16_offset_zero: ; CHECK: # %bb.0: @@ -2982,8 +2912,6 @@ define @splice_nxv32bf16_offset_max( %res } -declare @llvm.vector.splice.nxv1f16(, , i32) - define @splice_nxv1f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1f16_offset_zero: ; CHECK: # %bb.0: @@ -3067,8 +2995,6 @@ define @splice_nxv1f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv2f16(, , i32) - define @splice_nxv2f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f16_offset_zero: ; CHECK: # %bb.0: @@ -3152,8 +3078,6 @@ define @splice_nxv2f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv4f16(, , i32) - define @splice_nxv4f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f16_offset_zero: ; CHECK: # %bb.0: @@ -3237,8 +3161,6 @@ define @splice_nxv4f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv8f16(, , i32) - define @splice_nxv8f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f16_offset_zero: ; CHECK: # %bb.0: @@ -3316,8 +3238,6 @@ define @splice_nxv8f16_offset_max( %a, %res } -declare @llvm.vector.splice.nxv16f16(, , i32) - define @splice_nxv16f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16f16_offset_zero: ; CHECK: # %bb.0: @@ -3403,8 +3323,6 @@ define @splice_nxv16f16_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv32f16(, , i32) - define @splice_nxv32f16_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32f16_offset_zero: ; CHECK: # %bb.0: @@ -3492,8 +3410,6 @@ define @splice_nxv32f16_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv1f32(, , i32) - define @splice_nxv1f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1f32_offset_zero: ; CHECK: # %bb.0: @@ -3577,8 +3493,6 @@ define @splice_nxv1f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv2f32(, , i32) - define @splice_nxv2f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f32_offset_zero: ; CHECK: # %bb.0: @@ -3662,8 +3576,6 @@ define @splice_nxv2f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv4f32(, , i32) - define @splice_nxv4f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f32_offset_zero: ; CHECK: # %bb.0: @@ -3747,8 +3659,6 @@ define @splice_nxv4f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv8f32(, , i32) - define @splice_nxv8f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f32_offset_zero: ; CHECK: # %bb.0: @@ -3826,8 +3736,6 @@ define @splice_nxv8f32_offset_max( %a, ret %res } -declare @llvm.vector.splice.nxv16f32(, , i32) - define @splice_nxv16f32_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16f32_offset_zero: ; CHECK: # %bb.0: @@ -3913,8 +3821,6 @@ define @splice_nxv16f32_offset_max( % ret %res } -declare @llvm.vector.splice.nxv1f64(, , i32) - define @splice_nxv1f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1f64_offset_zero: ; CHECK: # %bb.0: @@ -3998,8 +3904,6 @@ define @splice_nxv1f64_offset_max( %a ret %res } -declare @llvm.vector.splice.nxv2f64(, , i32) - define @splice_nxv2f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2f64_offset_zero: ; CHECK: # %bb.0: @@ -4083,8 +3987,6 @@ define @splice_nxv2f64_offset_max( %a ret %res } -declare @llvm.vector.splice.nxv4f64(, , i32) - define @splice_nxv4f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4f64_offset_zero: ; CHECK: # %bb.0: @@ -4168,8 +4070,6 @@ define @splice_nxv4f64_offset_max( %a ret %res } -declare @llvm.vector.splice.nxv8f64(, , i32) - define @splice_nxv8f64_offset_zero( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8f64_offset_zero: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll b/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll index 37a50e1539982..831912fb61fb5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) - define target("riscv.vector.tuple", , 2) @test_vlseg_nxv8i8(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vlseg_nxv8i8 ; CHECK: bb.0.entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll index 28426ad018b83..a7874d1f519fd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -174,8 +174,6 @@ define @nxv32bf16( %v) { ret %r } -declare @llvm.fabs.nxv1f16() - define @vfabs_nxv1f16( %v) { ; ZVFH-LABEL: vfabs_nxv1f16: ; ZVFH: # %bb.0: @@ -202,8 +200,6 @@ define @vfabs_nxv1f16( %v) { ret %r } -declare @llvm.fabs.nxv2f16() - define @vfabs_nxv2f16( %v) { ; ZVFH-LABEL: vfabs_nxv2f16: ; ZVFH: # %bb.0: @@ -230,8 +226,6 @@ define @vfabs_nxv2f16( %v) { ret %r } -declare @llvm.fabs.nxv4f16() - define @vfabs_nxv4f16( %v) { ; ZVFH-LABEL: vfabs_nxv4f16: ; ZVFH: # %bb.0: @@ -258,8 +252,6 @@ define @vfabs_nxv4f16( %v) { ret %r } -declare @llvm.fabs.nxv8f16() - define @vfabs_nxv8f16( %v) { ; ZVFH-LABEL: vfabs_nxv8f16: ; ZVFH: # %bb.0: @@ -286,8 +278,6 @@ define @vfabs_nxv8f16( %v) { ret %r } -declare @llvm.fabs.nxv16f16() - define @vfabs_nxv16f16( %v) { ; ZVFH-LABEL: vfabs_nxv16f16: ; ZVFH: # %bb.0: @@ -314,8 +304,6 @@ define @vfabs_nxv16f16( %v) { ret %r } -declare @llvm.fabs.nxv32f16() - define @vfabs_nxv32f16( %v) { ; ZVFH-LABEL: vfabs_nxv32f16: ; ZVFH: # %bb.0: @@ -342,8 +330,6 @@ define @vfabs_nxv32f16( %v) { ret %r } -declare @llvm.fabs.nxv1f32() - define @vfabs_nxv1f32( %v) { ; CHECK-LABEL: vfabs_nxv1f32: ; CHECK: # %bb.0: @@ -354,8 +340,6 @@ define @vfabs_nxv1f32( %v) { ret %r } -declare @llvm.fabs.nxv2f32() - define @vfabs_nxv2f32( %v) { ; CHECK-LABEL: vfabs_nxv2f32: ; CHECK: # %bb.0: @@ -366,8 +350,6 @@ define @vfabs_nxv2f32( %v) { ret %r } -declare @llvm.fabs.nxv4f32() - define @vfabs_nxv4f32( %v) { ; CHECK-LABEL: vfabs_nxv4f32: ; CHECK: # %bb.0: @@ -378,8 +360,6 @@ define @vfabs_nxv4f32( %v) { ret %r } -declare @llvm.fabs.nxv8f32() - define @vfabs_nxv8f32( %v) { ; CHECK-LABEL: vfabs_nxv8f32: ; CHECK: # %bb.0: @@ -390,8 +370,6 @@ define @vfabs_nxv8f32( %v) { ret %r } -declare @llvm.fabs.nxv16f32() - define @vfabs_nxv16f32( %v) { ; CHECK-LABEL: vfabs_nxv16f32: ; CHECK: # %bb.0: @@ -402,8 +380,6 @@ define @vfabs_nxv16f32( %v) { ret %r } -declare @llvm.fabs.nxv1f64() - define @vfabs_nxv1f64( %v) { ; CHECK-LABEL: vfabs_nxv1f64: ; CHECK: # %bb.0: @@ -414,8 +390,6 @@ define @vfabs_nxv1f64( %v) { ret %r } -declare @llvm.fabs.nxv2f64() - define @vfabs_nxv2f64( %v) { ; CHECK-LABEL: vfabs_nxv2f64: ; CHECK: # %bb.0: @@ -426,8 +400,6 @@ define @vfabs_nxv2f64( %v) { ret %r } -declare @llvm.fabs.nxv4f64() - define @vfabs_nxv4f64( %v) { ; CHECK-LABEL: vfabs_nxv4f64: ; CHECK: # %bb.0: @@ -438,8 +410,6 @@ define @vfabs_nxv4f64( %v) { ret %r } -declare @llvm.fabs.nxv8f64() - define @vfabs_nxv8f64( %v) { ; CHECK-LABEL: vfabs_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll index c6888c0bcae0f..e0fcd4009ad2e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll @@ -324,8 +324,6 @@ define @vfabs_vv_nxv32bf16_unmasked( %v } -declare @llvm.vp.fabs.nxv1f16(, , i32) - define @vfabs_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -378,8 +376,6 @@ define @vfabs_vv_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv2f16(, , i32) - define @vfabs_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -432,8 +428,6 @@ define @vfabs_vv_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv4f16(, , i32) - define @vfabs_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -486,8 +480,6 @@ define @vfabs_vv_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv8f16(, , i32) - define @vfabs_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -540,8 +532,6 @@ define @vfabs_vv_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.fabs.nxv16f16(, , i32) - define @vfabs_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -594,8 +584,6 @@ define @vfabs_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fabs.nxv32f16(, , i32) - define @vfabs_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfabs_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -648,8 +636,6 @@ define @vfabs_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fabs.nxv1f32(, , i32) - define @vfabs_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f32: ; CHECK: # %bb.0: @@ -670,8 +656,6 @@ define @vfabs_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv2f32(, , i32) - define @vfabs_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f32: ; CHECK: # %bb.0: @@ -692,8 +676,6 @@ define @vfabs_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv4f32(, , i32) - define @vfabs_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f32: ; CHECK: # %bb.0: @@ -714,8 +696,6 @@ define @vfabs_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv8f32(, , i32) - define @vfabs_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f32: ; CHECK: # %bb.0: @@ -736,8 +716,6 @@ define @vfabs_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fabs.nxv16f32(, , i32) - define @vfabs_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f32: ; CHECK: # %bb.0: @@ -758,8 +736,6 @@ define @vfabs_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fabs.nxv1f64(, , i32) - define @vfabs_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f64: ; CHECK: # %bb.0: @@ -780,8 +756,6 @@ define @vfabs_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv2f64(, , i32) - define @vfabs_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f64: ; CHECK: # %bb.0: @@ -802,8 +776,6 @@ define @vfabs_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv4f64(, , i32) - define @vfabs_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f64: ; CHECK: # %bb.0: @@ -824,8 +796,6 @@ define @vfabs_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv7f64(, , i32) - define @vfabs_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv7f64: ; CHECK: # %bb.0: @@ -846,8 +816,6 @@ define @vfabs_vv_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.fabs.nxv8f64(, , i32) - define @vfabs_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f64: ; CHECK: # %bb.0: @@ -869,7 +837,6 @@ define @vfabs_vv_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.fabs.nxv16f64(, , i32) define @vfabs_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll index db1b081258d5f..0130af7d9e507 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -555,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -579,13 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll index 03ef641364335..8d10b21fc3e3f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll @@ -267,7 +267,6 @@ define @vfadd_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fadd.nxv1f16(, , metadata, metadata) define @vfadd_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -315,7 +314,6 @@ define @vfadd_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv2f16(, , metadata, metadata) define @vfadd_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -363,7 +361,6 @@ define @vfadd_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv4f16(, , metadata, metadata) define @vfadd_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -411,7 +408,6 @@ define @vfadd_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv8f16(, , metadata, metadata) define @vfadd_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -459,7 +455,6 @@ define @vfadd_vf_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fadd.nxv16f16(, , metadata, metadata) define @vfadd_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -507,7 +502,6 @@ define @vfadd_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv32f16(, , metadata, metadata) define @vfadd_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfadd_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -600,7 +594,6 @@ define @vfadd_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv1f32(, , metadata, metadata) define @vfadd_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -624,7 +617,6 @@ define @vfadd_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv2f32(, , metadata, metadata) define @vfadd_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -648,7 +640,6 @@ define @vfadd_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv4f32(, , metadata, metadata) define @vfadd_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -672,7 +663,6 @@ define @vfadd_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv8f32(, , metadata, metadata) define @vfadd_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -696,7 +686,6 @@ define @vfadd_vf_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fadd.nxv16f32(, , metadata, metadata) define @vfadd_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -720,7 +709,6 @@ define @vfadd_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fadd.nxv1f64(, , metadata, metadata) define @vfadd_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -744,7 +732,6 @@ define @vfadd_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fadd.nxv2f64(, , metadata, metadata) define @vfadd_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -768,7 +755,6 @@ define @vfadd_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fadd.nxv4f64(, , metadata, metadata) define @vfadd_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -792,7 +778,6 @@ define @vfadd_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fadd.nxv8f64(, , metadata, metadata) define @vfadd_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfadd_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll index 1ab2209647c80..d03b068e11ea8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -15,8 +15,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFBFA -declare @llvm.vp.fadd.nxv1bf16(, , , i32) - define @vfadd_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv1bf16: ; ZVFH: # %bb.0: @@ -267,8 +265,6 @@ define @vfadd_vf_nxv1bf16_unmasked_commute( %v } -declare @llvm.vp.fadd.nxv2bf16(, , , i32) - define @vfadd_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv2bf16: ; ZVFH: # %bb.0: @@ -431,8 +427,6 @@ define @vfadd_vf_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv4bf16(, , , i32) - define @vfadd_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv4bf16: ; ZVFH: # %bb.0: @@ -595,8 +589,6 @@ define @vfadd_vf_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv8bf16(, , , i32) - define @vfadd_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv8bf16: ; ZVFH: # %bb.0: @@ -759,8 +751,6 @@ define @vfadd_vf_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv16bf16(, , , i32) - define @vfadd_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv16bf16: ; ZVFH: # %bb.0: @@ -923,8 +913,6 @@ define @vfadd_vf_nxv16bf16_unmasked( %v } -declare @llvm.vp.fadd.nxv32bf16(, , , i32) - define @vfadd_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv32bf16: ; ZVFH: # %bb.0: @@ -1565,7 +1553,6 @@ define @vfadd_vf_nxv32bf16_unmasked( @llvm.vp.fadd.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.fadd.nxv1f16(, , , i32) define @vfadd_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv1f16: @@ -1783,8 +1770,6 @@ define @vfadd_vf_nxv1f16_unmasked_commute( %v } -declare @llvm.vp.fadd.nxv2f16(, , , i32) - define @vfadd_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -1925,8 +1910,6 @@ define @vfadd_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fadd.nxv4f16(, , , i32) - define @vfadd_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -2067,8 +2050,6 @@ define @vfadd_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fadd.nxv8f16(, , , i32) - define @vfadd_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -2209,8 +2190,6 @@ define @vfadd_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fadd.nxv16f16(, , , i32) - define @vfadd_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -2351,8 +2330,6 @@ define @vfadd_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fadd.nxv32f16(, , , i32) - define @vfadd_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfadd_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -2819,8 +2796,6 @@ define @vfadd_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fadd.nxv1f32(, , , i32) - define @vfadd_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f32: ; CHECK: # %bb.0: @@ -2865,8 +2840,6 @@ define @vfadd_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv2f32(, , , i32) - define @vfadd_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f32: ; CHECK: # %bb.0: @@ -2911,8 +2884,6 @@ define @vfadd_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv4f32(, , , i32) - define @vfadd_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f32: ; CHECK: # %bb.0: @@ -2957,8 +2928,6 @@ define @vfadd_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv8f32(, , , i32) - define @vfadd_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f32: ; CHECK: # %bb.0: @@ -3003,8 +2972,6 @@ define @vfadd_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fadd.nxv16f32(, , , i32) - define @vfadd_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv16f32: ; CHECK: # %bb.0: @@ -3049,8 +3016,6 @@ define @vfadd_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fadd.nxv1f64(, , , i32) - define @vfadd_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f64: ; CHECK: # %bb.0: @@ -3095,8 +3060,6 @@ define @vfadd_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fadd.nxv2f64(, , , i32) - define @vfadd_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f64: ; CHECK: # %bb.0: @@ -3141,8 +3104,6 @@ define @vfadd_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fadd.nxv4f64(, , , i32) - define @vfadd_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f64: ; CHECK: # %bb.0: @@ -3187,8 +3148,6 @@ define @vfadd_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fadd.nxv7f64(, , , i32) - define @vfadd_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv7f64: ; CHECK: # %bb.0: @@ -3199,8 +3158,6 @@ define @vfadd_vv_nxv7f64( %va, %v } -declare @llvm.vp.fadd.nxv8f64(, , , i32) - define @vfadd_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll index 10f7d922efeed..d9e522be048d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -14,12 +14,6 @@ ; ZVFMIN: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vfadd -declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -38,13 +32,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -64,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -88,13 +69,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -114,12 +88,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -138,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -214,12 +162,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -238,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -264,12 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -288,13 +217,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -315,12 +237,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -439,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -465,12 +348,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -489,13 +366,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -515,12 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -539,13 +403,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -566,12 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -590,13 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -616,12 +460,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -640,13 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -666,12 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -690,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -716,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -740,13 +552,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -767,12 +572,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -791,13 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -817,12 +609,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -841,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -867,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -891,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -917,12 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -941,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -967,12 +720,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -991,13 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1017,12 +757,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1041,13 +775,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1067,12 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1091,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1117,12 +831,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1141,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1167,12 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1191,13 +886,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1217,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1241,13 +923,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1267,12 +942,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1291,13 +960,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1317,12 +979,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1341,13 +997,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1367,12 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1391,13 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1417,12 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1441,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1467,12 +1090,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1491,13 +1108,6 @@ entry: ret %a } -declare @llvm.riscv.vfadd.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll index d7d49b379b5a4..2af8a8e668ea7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfclass.nxv1i16.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i16_nxv1bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -52,11 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i16.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i16_nxv2bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -100,11 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i16.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i16_nxv4bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -122,12 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -148,11 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i16.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i16_nxv8bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -170,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -196,11 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv16i16.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv16i16_nxv16bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -218,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -244,11 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv32i16.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv32i16_nxv32bf16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -266,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll index 862a8355d4321..4879449129161 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll @@ -63,7 +63,6 @@ define @isnan_nxv2f32( %x) { ret %1 } - define @isnan_nxv4f32( %x) { ; CHECK-LABEL: isnan_nxv4f32: ; CHECK: # %bb.0: @@ -188,11 +187,3 @@ define @isnotfinite_nxv16f32( %x) { ret %1 } -declare @llvm.is.fpclass.nxv2f16(, i32) -declare @llvm.is.fpclass.nxv2f32(, i32) -declare @llvm.is.fpclass.nxv4f32(, i32) -declare @llvm.is.fpclass.nxv8f32(, i32) -declare @llvm.is.fpclass.nxv16f32(, i32) -declare @llvm.is.fpclass.nxv2f64(, i32) -declare @llvm.is.fpclass.nxv4f64(, i32) -declare @llvm.is.fpclass.nxv8f64(, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll index 36e1bea1f9994..8c1c973c9bb0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll @@ -218,12 +218,3 @@ define @isneginf_nxv8f64_unmasked( %x, i3 ret %1 } - -declare @llvm.vp.is.fpclass.nxv2f16(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv2f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv4f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv8f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv16f32(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv2f64(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv4f64(, i32, , i32) -declare @llvm.vp.is.fpclass.nxv8f64(, i32, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll index 7017946276be8..e0ef834fb5000 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfclass.nxv1i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -52,11 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -100,11 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -122,12 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -148,11 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -170,12 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -196,11 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv16i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -218,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -244,11 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv32i16( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -266,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -292,11 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv1i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -314,12 +243,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -340,11 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -362,12 +280,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -388,11 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -410,12 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -436,11 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -458,12 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -484,11 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv16i32( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -506,12 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -532,11 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv1i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -580,11 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv2i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +465,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -628,11 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv4i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -650,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -676,11 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.nxv8i64( - , - , - iXLen); - define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -698,12 +539,6 @@ entry: ret %a } -declare @llvm.riscv.vfclass.mask.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll index ec6ab422d6405..1981c78398b06 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcmp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fcmp.nxv1f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv1f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f16: ; CHECK: # %bb.0: @@ -637,7 +636,6 @@ define @fcmp_uno_fv_nxv1f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv2f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv2f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv2f16: ; CHECK: # %bb.0: @@ -1270,7 +1268,6 @@ define @fcmp_uno_fv_nxv2f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv4f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv4f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv4f16: ; CHECK: # %bb.0: @@ -1903,7 +1900,6 @@ define @fcmp_uno_fv_nxv4f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv8f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv8f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f16: ; CHECK: # %bb.0: @@ -2578,7 +2574,6 @@ define @fcmp_uno_fv_nxv8f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv16f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv16f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv16f16: ; CHECK: # %bb.0: @@ -3253,7 +3248,6 @@ define @fcmp_uno_fv_nxv16f16( %va, half % ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv32f16(, , metadata, metadata) define @fcmp_oeq_vv_nxv32f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv32f16: ; CHECK: # %bb.0: @@ -3928,7 +3922,6 @@ define @fcmp_uno_fv_nxv32f16( %va, half % ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv1f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv1f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f32: ; CHECK: # %bb.0: @@ -4561,7 +4554,6 @@ define @fcmp_uno_fv_nxv1f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv2f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv2f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv2f32: ; CHECK: # %bb.0: @@ -5194,7 +5186,6 @@ define @fcmp_uno_fv_nxv2f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv4f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv4f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv4f32: ; CHECK: # %bb.0: @@ -5869,7 +5860,6 @@ define @fcmp_uno_fv_nxv4f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv8f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv8f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f32: ; CHECK: # %bb.0: @@ -6544,7 +6534,6 @@ define @fcmp_uno_fv_nxv8f32( %va, float %b ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv16f32(, , metadata, metadata) define @fcmp_oeq_vv_nxv16f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv16f32: ; CHECK: # %bb.0: @@ -7219,7 +7208,6 @@ define @fcmp_uno_fv_nxv16f32( %va, float ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv1f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv1f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv1f64: ; CHECK: # %bb.0: @@ -7852,7 +7840,6 @@ define @fcmp_uno_fv_nxv1f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv2f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv2f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv2f64: ; CHECK: # %bb.0: @@ -8527,7 +8514,6 @@ define @fcmp_uno_fv_nxv2f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv4f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv4f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv4f64: ; CHECK: # %bb.0: @@ -9202,7 +9188,6 @@ define @fcmp_uno_fv_nxv4f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmp.nxv8f64(, , metadata, metadata) define @fcmp_oeq_vv_nxv8f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll index 2ca9dd24e915a..d33cd5a558630 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcmps-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fcmps.nxv1f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv1f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv1f16: ; CHECK: # %bb.0: @@ -536,7 +535,6 @@ define @fcmps_uno_fv_nxv1f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv2f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv2f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv2f16: ; CHECK: # %bb.0: @@ -1068,7 +1066,6 @@ define @fcmps_uno_fv_nxv2f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv4f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv4f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv4f16: ; CHECK: # %bb.0: @@ -1600,7 +1597,6 @@ define @fcmps_uno_fv_nxv4f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv8f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv8f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv8f16: ; CHECK: # %bb.0: @@ -2132,7 +2128,6 @@ define @fcmps_uno_fv_nxv8f16( %va, half %b) ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv16f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv16f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv16f16: ; CHECK: # %bb.0: @@ -2664,7 +2659,6 @@ define @fcmps_uno_fv_nxv16f16( %va, half ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv32f16(, , metadata, metadata) define @fcmps_oeq_vv_nxv32f16( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv32f16: ; CHECK: # %bb.0: @@ -3196,7 +3190,6 @@ define @fcmps_uno_fv_nxv32f16( %va, half ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv1f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv1f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv1f32: ; CHECK: # %bb.0: @@ -3728,7 +3721,6 @@ define @fcmps_uno_fv_nxv1f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv2f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv2f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv2f32: ; CHECK: # %bb.0: @@ -4260,7 +4252,6 @@ define @fcmps_uno_fv_nxv2f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv4f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv4f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv4f32: ; CHECK: # %bb.0: @@ -4792,7 +4783,6 @@ define @fcmps_uno_fv_nxv4f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv8f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv8f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv8f32: ; CHECK: # %bb.0: @@ -5324,7 +5314,6 @@ define @fcmps_uno_fv_nxv8f32( %va, float % ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv16f32(, , metadata, metadata) define @fcmps_oeq_vv_nxv16f32( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv16f32: ; CHECK: # %bb.0: @@ -5856,7 +5845,6 @@ define @fcmps_uno_fv_nxv16f32( %va, floa ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv1f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv1f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv1f64: ; CHECK: # %bb.0: @@ -6388,7 +6376,6 @@ define @fcmps_uno_fv_nxv1f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv2f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv2f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv2f64: ; CHECK: # %bb.0: @@ -6920,7 +6907,6 @@ define @fcmps_uno_fv_nxv2f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv4f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv4f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv4f64: ; CHECK: # %bb.0: @@ -7452,7 +7438,6 @@ define @fcmps_uno_fv_nxv4f64( %va, double ret %1 } -declare @llvm.experimental.constrained.fcmps.nxv8f64(, , metadata, metadata) define @fcmps_oeq_vv_nxv8f64( %va, %vb) nounwind strictfp { ; CHECK-LABEL: fcmps_oeq_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll index bef2e8d3b57fc..3a3fdbe59e6c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -198,8 +198,6 @@ define @nxv32bf32( %vm, %r } -declare @llvm.copysign.nxv1f16(, ) - define @vfcopysign_vv_nxv1f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -678,8 +676,6 @@ define @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64( %r } -declare @llvm.copysign.nxv2f16(, ) - define @vfcopysign_vv_nxv2f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -818,8 +814,6 @@ define @vfcopynsign_vf_nxv2f16( %vm, half ret %r } -declare @llvm.copysign.nxv4f16(, ) - define @vfcopysign_vv_nxv4f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -958,8 +952,6 @@ define @vfcopynsign_vf_nxv4f16( %vm, half ret %r } -declare @llvm.copysign.nxv8f16(, ) - define @vfcopysign_vv_nxv8f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -1438,8 +1430,6 @@ define @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64( %r } -declare @llvm.copysign.nxv16f16(, ) - define @vfcopysign_vv_nxv16f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -1578,8 +1568,6 @@ define @vfcopynsign_vf_nxv16f16( %vm, h ret %r } -declare @llvm.copysign.nxv32f16(, ) - define @vfcopysign_vv_nxv32f16( %vm, %vs) { ; ZVFH-LABEL: vfcopysign_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1718,8 +1706,6 @@ define @vfcopynsign_vf_nxv32f16( %vm, h ret %r } -declare @llvm.copysign.nxv1f32(, ) - define @vfcopysign_vv_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1880,8 +1866,6 @@ define @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64( %r } -declare @llvm.copysign.nxv2f32(, ) - define @vfcopysign_vv_nxv2f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1928,8 +1912,6 @@ define @vfcopynsign_vf_nxv2f32( %vm, fl ret %r } -declare @llvm.copysign.nxv4f32(, ) - define @vfcopysign_vv_nxv4f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1976,8 +1958,6 @@ define @vfcopynsign_vf_nxv4f32( %vm, fl ret %r } -declare @llvm.copysign.nxv8f32(, ) - define @vfcopysign_vv_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f32: ; CHECK: # %bb.0: @@ -2138,8 +2118,6 @@ define @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64( %r } -declare @llvm.copysign.nxv16f32(, ) - define @vfcopysign_vv_nxv16f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv16f32: ; CHECK: # %bb.0: @@ -2186,8 +2164,6 @@ define @vfcopynsign_vf_nxv16f32( %vm, ret %r } -declare @llvm.copysign.nxv1f64(, ) - define @vfcopysign_vv_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f64: ; CHECK: # %bb.0: @@ -2350,8 +2326,6 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32( %r } -declare @llvm.copysign.nxv2f64(, ) - define @vfcopysign_vv_nxv2f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f64: ; CHECK: # %bb.0: @@ -2398,8 +2372,6 @@ define @vfcopynsign_vf_nxv2f64( %vm, ret %r } -declare @llvm.copysign.nxv4f64(, ) - define @vfcopysign_vv_nxv4f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f64: ; CHECK: # %bb.0: @@ -2446,8 +2418,6 @@ define @vfcopynsign_vf_nxv4f64( %vm, ret %r } -declare @llvm.copysign.nxv8f64(, ) - define @vfcopysign_vv_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll index 2d90371856b73..5af4db662d867 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll index eb5c5ea58f1b3..1e826cc4b1b42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll index b7f9d1a2b2c41..d70c31068aec4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll index c2db9d64b1842..0fe2d0be48046 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64( - , - , - iXLen); - define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll index f49eaf69f164f..c3131926b3d79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll index 662ff865b9a7f..8b7c0fb8d6889 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll index bb121416ddec3..6f23723712d90 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll @@ -286,7 +286,6 @@ define @vfdiv_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv1f16(, , metadata, metadata) define @vfdiv_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -334,7 +333,6 @@ define @vfdiv_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv2f16(, , metadata, metadata) define @vfdiv_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -382,7 +380,6 @@ define @vfdiv_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv4f16(, , metadata, metadata) define @vfdiv_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -430,7 +427,6 @@ define @vfdiv_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv8f16(, , metadata, metadata) define @vfdiv_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -503,7 +499,6 @@ define @vfdiv_fv_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv16f16(, , metadata, metadata) define @vfdiv_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -551,7 +546,6 @@ define @vfdiv_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv32f16(, , metadata, metadata) define @vfdiv_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfdiv_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -644,7 +638,6 @@ define @vfdiv_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv1f32(, , metadata, metadata) define @vfdiv_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -668,7 +661,6 @@ define @vfdiv_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv2f32(, , metadata, metadata) define @vfdiv_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -692,7 +684,6 @@ define @vfdiv_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv4f32(, , metadata, metadata) define @vfdiv_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -716,7 +707,6 @@ define @vfdiv_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv8f32(, , metadata, metadata) define @vfdiv_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -752,7 +742,6 @@ define @vfdiv_fv_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv16f32(, , metadata, metadata) define @vfdiv_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,7 +765,6 @@ define @vfdiv_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv1f64(, , metadata, metadata) define @vfdiv_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -800,7 +788,6 @@ define @vfdiv_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv2f64(, , metadata, metadata) define @vfdiv_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -824,7 +811,6 @@ define @vfdiv_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv4f64(, , metadata, metadata) define @vfdiv_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -848,7 +834,6 @@ define @vfdiv_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fdiv.nxv8f64(, , metadata, metadata) define @vfdiv_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfdiv_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll index 736d575a1a4e3..e9d7137919ac9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fdiv.nxv1bf16(, , , i32) - define @vfdiv_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -82,8 +80,6 @@ define @vfdiv_vf_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv2bf16(, , , i32) - define @vfdiv_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -152,8 +148,6 @@ define @vfdiv_vf_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv4bf16(, , , i32) - define @vfdiv_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -222,8 +216,6 @@ define @vfdiv_vf_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv8bf16(, , , i32) - define @vfdiv_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -292,8 +284,6 @@ define @vfdiv_vf_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv16bf16(, , , i32) - define @vfdiv_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -362,8 +352,6 @@ define @vfdiv_vf_nxv16bf16_unmasked( %v } -declare @llvm.vp.fdiv.nxv32bf16(, , , i32) - define @vfdiv_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -594,7 +582,6 @@ define @vfdiv_vf_nxv32bf16_unmasked( @llvm.vp.fdiv.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.fdiv.nxv1f16(, , , i32) define @vfdiv_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv1f16: @@ -688,8 +675,6 @@ define @vfdiv_vf_nxv1f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv2f16(, , , i32) - define @vfdiv_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -782,8 +767,6 @@ define @vfdiv_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv4f16(, , , i32) - define @vfdiv_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -876,8 +859,6 @@ define @vfdiv_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv8f16(, , , i32) - define @vfdiv_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -970,8 +951,6 @@ define @vfdiv_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fdiv.nxv16f16(, , , i32) - define @vfdiv_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -1064,8 +1043,6 @@ define @vfdiv_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv32f16(, , , i32) - define @vfdiv_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfdiv_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1321,8 +1298,6 @@ define @vfdiv_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv1f32(, , , i32) - define @vfdiv_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1367,8 +1342,6 @@ define @vfdiv_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv2f32(, , , i32) - define @vfdiv_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1413,8 +1386,6 @@ define @vfdiv_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv4f32(, , , i32) - define @vfdiv_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1459,8 +1430,6 @@ define @vfdiv_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv8f32(, , , i32) - define @vfdiv_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1505,8 +1474,6 @@ define @vfdiv_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv16f32(, , , i32) - define @vfdiv_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1551,8 +1518,6 @@ define @vfdiv_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv1f64(, , , i32) - define @vfdiv_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1597,8 +1562,6 @@ define @vfdiv_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv2f64(, , , i32) - define @vfdiv_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1643,8 +1606,6 @@ define @vfdiv_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv4f64(, , , i32) - define @vfdiv_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1689,8 +1650,6 @@ define @vfdiv_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv7f64(, , , i32) - define @vfdiv_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1701,8 +1660,6 @@ define @vfdiv_vv_nxv7f64( %va, %v } -declare @llvm.vp.fdiv.nxv8f64(, , , i32) - define @vfdiv_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll index 62fb9df7a623e..71d119d9aff6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -757,12 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,13 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -807,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -831,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -857,12 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -881,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -907,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -931,13 +691,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -957,12 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -981,13 +728,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1007,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1031,13 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1057,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1081,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1107,12 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1131,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1181,13 +876,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1207,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +913,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1257,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1281,13 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1307,12 +969,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1331,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1357,12 +1006,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1381,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1407,12 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1431,13 +1061,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1457,12 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1481,13 +1098,6 @@ entry: ret %a } -declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll index c510121ee3ebe..5ad4c2d41b40f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare iXLen @llvm.riscv.vfirst.iXLen.nxv1i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -35,11 +31,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -71,10 +62,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv2i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -89,11 +76,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -111,10 +93,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv4i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -129,11 +107,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -151,10 +124,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv8i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -169,11 +138,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -191,10 +155,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv16i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -209,11 +169,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -231,10 +186,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv32i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -249,11 +200,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -271,10 +217,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.iXLen.nxv64i1( - , - iXLen); - define iXLen @intrinsic_vfirst_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -289,11 +231,6 @@ entry: ret iXLen %a } -declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1( - , - , - iXLen); - define iXLen @intrinsic_vfirst_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll index 7e580d1057525..91b4080a4ed84 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll @@ -4,10 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.fmul.nxv1f64(, , , i32) - ; (-N0 * -N1) + N2 --> (N0 * N1) + N2 define @test1( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll index 728fa07a7d4e5..c25a0d47c5c53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1bf16(, , , , i32) - define @vfma_vv_nxv1bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -126,8 +124,6 @@ define @vfma_vf_nxv1bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv2bf16(, , , , i32) - define @vfma_vv_nxv2bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -240,8 +236,6 @@ define @vfma_vf_nxv2bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv4bf16(, , , , i32) - define @vfma_vv_nxv4bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -354,8 +348,6 @@ define @vfma_vf_nxv4bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv8bf16(, , , , i32) - define @vfma_vv_nxv8bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -468,8 +460,6 @@ define @vfma_vf_nxv8bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv16bf16(, , , , i32) - define @vfma_vv_nxv16bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -586,8 +576,6 @@ define @vfma_vf_nxv16bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv32bf16(, , , , i32) - define @vfma_vv_nxv32bf16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -1280,8 +1268,6 @@ define @vfma_vf_nxv32bf16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv1f16(, , , , i32) - define @vfma_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -1431,8 +1417,6 @@ define @vfma_vf_nxv1f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv2f16(, , , , i32) - define @vfma_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -1582,8 +1566,6 @@ define @vfma_vf_nxv2f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv4f16(, , , , i32) - define @vfma_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -1733,8 +1715,6 @@ define @vfma_vf_nxv4f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv8f16(, , , , i32) - define @vfma_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -1884,8 +1864,6 @@ define @vfma_vf_nxv8f16_unmasked_commute( ret %v } -declare @llvm.vp.fma.nxv16f16(, , , , i32) - define @vfma_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -2039,8 +2017,6 @@ define @vfma_vf_nxv16f16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv32f16(, , , , i32) - define @vfma_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfma_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -2772,8 +2748,6 @@ define @vfma_vf_nxv32f16_unmasked_commute( %v } -declare @llvm.vp.fma.nxv1f32(, , , , i32) - define @vfma_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32: ; CHECK: # %bb.0: @@ -2843,8 +2817,6 @@ define @vfma_vf_nxv1f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) - define @vfma_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32: ; CHECK: # %bb.0: @@ -2914,8 +2886,6 @@ define @vfma_vf_nxv2f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) - define @vfma_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32: ; CHECK: # %bb.0: @@ -2985,8 +2955,6 @@ define @vfma_vf_nxv4f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) - define @vfma_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32: ; CHECK: # %bb.0: @@ -3056,8 +3024,6 @@ define @vfma_vf_nxv8f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) - define @vfma_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f32: ; CHECK: # %bb.0: @@ -3129,8 +3095,6 @@ define @vfma_vf_nxv16f32_unmasked_commute( %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) - define @vfma_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64: ; CHECK: # %bb.0: @@ -3200,8 +3164,6 @@ define @vfma_vf_nxv1f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) - define @vfma_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64: ; CHECK: # %bb.0: @@ -3271,8 +3233,6 @@ define @vfma_vf_nxv2f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) - define @vfma_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64: ; CHECK: # %bb.0: @@ -3342,8 +3302,6 @@ define @vfma_vf_nxv4f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv7f64(, , , , i32) - define @vfma_vv_nxv7f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv7f64: ; CHECK: # %bb.0: @@ -3367,8 +3325,6 @@ define @vfma_vv_nxv7f64_unmasked( %va ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) - define @vfma_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f64: ; CHECK: # %bb.0: @@ -3440,8 +3396,6 @@ define @vfma_vf_nxv8f64_unmasked_commute( %v } -declare @llvm.vp.fma.nxv16f64(, , , , i32) - define @vfma_vv_nxv16f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f64: ; CHECK: # %bb.0: @@ -3657,8 +3611,6 @@ define @vfma_vv_nxv16f64_unmasked( ret %v } -declare @llvm.vp.fneg.nxv1f16(, , i32) - define @vfmsub_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -4524,8 +4476,6 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f16(, , i32) - define @vfmsub_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -5391,8 +5341,6 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f16(, , i32) - define @vfmsub_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -6258,8 +6206,6 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f16(, , i32) - define @vfmsub_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -7125,8 +7071,6 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f16(, , i32) - define @vfmsub_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -8005,8 +7949,6 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv32f16(, , i32) - define @vfmsub_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -11822,8 +11764,6 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f32(, , i32) - define @vfmsub_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -12213,8 +12153,6 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f32(, , i32) - define @vfmsub_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -12604,8 +12542,6 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f32(, , i32) - define @vfmsub_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -12995,8 +12931,6 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f32(, , i32) - define @vfmsub_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -13386,8 +13320,6 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f32(, , i32) - define @vfmsub_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -13787,8 +13719,6 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f64(, , i32) - define @vfmsub_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -14178,8 +14108,6 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f64(, , i32) - define @vfmsub_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -14569,8 +14497,6 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f64(, , i32) - define @vfmsub_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -14960,8 +14886,6 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f64(, , i32) - define @vfmsub_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll index 13821d745846f..6c66cfcaef07b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll index ef583b748b9c2..28a8ef0087d85 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vfmacc_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f16: ; CHECK: # %bb.0: @@ -115,11 +110,6 @@ define @vfmacc_vf_nxv1f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vfmacc_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f16: ; CHECK: # %bb.0: @@ -226,11 +216,6 @@ define @vfmacc_vf_nxv2f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vfmacc_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f16: ; CHECK: # %bb.0: @@ -337,11 +322,6 @@ define @vfmacc_vf_nxv4f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vfmacc_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f16: ; CHECK: # %bb.0: @@ -448,11 +428,6 @@ define @vfmacc_vf_nxv8f16_commute_ta( %va ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vfmacc_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv16f16: ; CHECK: # %bb.0: @@ -559,11 +534,6 @@ define @vfmacc_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vfmacc_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv32f16: ; CHECK: # %bb.0: @@ -673,11 +643,6 @@ define @vfmacc_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vfmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f32: ; CHECK: # %bb.0: @@ -784,11 +749,6 @@ define @vfmacc_vf_nxv1f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vfmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f32: ; CHECK: # %bb.0: @@ -895,11 +855,6 @@ define @vfmacc_vf_nxv2f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vfmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1006,11 +961,6 @@ define @vfmacc_vf_nxv4f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vfmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1117,11 +1067,6 @@ define @vfmacc_vf_nxv8f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vfmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1231,11 +1176,6 @@ define @vfmacc_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vfmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1342,11 +1282,6 @@ define @vfmacc_vf_nxv1f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vfmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1453,11 +1388,6 @@ define @vfmacc_vf_nxv2f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vfmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1564,11 +1494,6 @@ define @vfmacc_vf_nxv4f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vfmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll index 1f0db104df7aa..ea11cb177b49d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll index 09fc199c29d23..6d8324a1df188 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll index 11aebe9ae2a96..99c9db08aac6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll @@ -53,7 +53,6 @@ define @vfmadd_vf_nxv1bf16( %va, %vd } - define @vfmadd_vv_nxv2bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -90,7 +89,6 @@ define @vfmadd_vf_nxv2bf16( %va, %vd } - define @vfmadd_vv_nxv4bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -127,7 +125,6 @@ define @vfmadd_vf_nxv4bf16( %va, %vd } - define @vfmadd_vv_nxv8bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -164,7 +161,6 @@ define @vfmadd_vf_nxv8bf16( %va, %vd } - define @vfmadd_vv_nxv16bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -217,7 +213,6 @@ define @vfmadd_vf_nxv16bf16( %va, < ret %vd } - define @vfmadd_vv_nxv32bf16( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -395,8 +390,6 @@ define @vfmadd_vf_nxv32bf16( %va, < ret %vd } -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfmadd_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -445,8 +438,6 @@ define @vfmadd_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfmadd_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -495,8 +486,6 @@ define @vfmadd_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfmadd_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -545,8 +534,6 @@ define @vfmadd_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfmadd_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -595,8 +582,6 @@ define @vfmadd_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfmadd_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -661,8 +646,6 @@ define @vfmadd_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfmadd_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmadd_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -853,8 +836,6 @@ define @vfmadd_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfmadd_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv1f32: ; CHECK: # %bb.0: @@ -877,8 +858,6 @@ define @vfmadd_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfmadd_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv2f32: ; CHECK: # %bb.0: @@ -901,8 +880,6 @@ define @vfmadd_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfmadd_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv4f32: ; CHECK: # %bb.0: @@ -925,8 +902,6 @@ define @vfmadd_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfmadd_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv8f32: ; CHECK: # %bb.0: @@ -949,8 +924,6 @@ define @vfmadd_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfmadd_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: @@ -974,8 +947,6 @@ define @vfmadd_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfmadd_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv1f64: ; CHECK: # %bb.0: @@ -998,8 +969,6 @@ define @vfmadd_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfmadd_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1022,8 +991,6 @@ define @vfmadd_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfmadd_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1046,8 +1013,6 @@ define @vfmadd_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfmadd_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll index c0d3b55f5d35e..b8f138f4bd52e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll @@ -61,8 +61,6 @@ define @vfmadd_vf_nxv1bf16( %va, %vd } -declare @llvm.fma.v2bf16(, , ) - define @vfmadd_vv_nxv2bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -97,8 +95,6 @@ define @vfmadd_vf_nxv2bf16( %va, %vd } -declare @llvm.fma.v4bf16(, , ) - define @vfmadd_vv_nxv4bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -133,8 +129,6 @@ define @vfmadd_vf_nxv4bf16( %va, %vd } -declare @llvm.fma.v8bf16(, , ) - define @vfmadd_vv_nxv8bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -169,8 +163,6 @@ define @vfmadd_vf_nxv8bf16( %va, %vd } -declare @llvm.fma.v16bf16(, , ) - define @vfmadd_vv_nxv16bf16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -205,8 +197,6 @@ define @vfmadd_vf_nxv16bf16( %va, < ret %vd } -declare @llvm.fma.v32bf16(, , ) - define @vfmadd_vv_nxv32bf16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv32bf16: ; ZVFH: # %bb.0: @@ -571,8 +561,6 @@ define @vfmadd_vf_nxv32bf16( %va, < ret %vd } -declare @llvm.fma.v1f16(, , ) - define @vfmadd_vv_nxv1f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -641,8 +629,6 @@ define @vfmadd_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfmadd_vv_nxv2f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -689,8 +675,6 @@ define @vfmadd_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfmadd_vv_nxv4f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -737,8 +721,6 @@ define @vfmadd_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfmadd_vv_nxv8f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -785,8 +767,6 @@ define @vfmadd_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfmadd_vv_nxv16f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -833,8 +813,6 @@ define @vfmadd_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfmadd_vv_nxv32f16( %va, %vb, %vc) { ; ZVFH-LABEL: vfmadd_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1028,8 +1006,6 @@ define @vfmadd_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfmadd_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1052,8 +1028,6 @@ define @vfmadd_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfmadd_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1076,8 +1050,6 @@ define @vfmadd_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfmadd_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1100,8 +1072,6 @@ define @vfmadd_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfmadd_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1124,8 +1094,6 @@ define @vfmadd_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfmadd_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1149,8 +1117,6 @@ define @vfmadd_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfmadd_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1173,8 +1139,6 @@ define @vfmadd_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfmadd_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1197,8 +1161,6 @@ define @vfmadd_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfmadd_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1221,8 +1183,6 @@ define @vfmadd_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfmadd_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll index fb04888a84dea..0609dce18903f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll index a337d3061ce78..dff9309194486 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll index c06836f129005..63bfe1dfad5fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll @@ -44,8 +44,6 @@ define @vfmax_nxv1bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv2bf16(, ) - define @vfmax_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -78,8 +76,6 @@ define @vfmax_nxv2bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv4bf16(, ) - define @vfmax_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define @vfmax_nxv4bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv8bf16(, ) - define @vfmax_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -146,8 +140,6 @@ define @vfmax_nxv8bf16_vf( %a, bfloat ret %v } -declare @llvm.maxnum.nxv16bf16(, ) - define @vfmax_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -180,8 +172,6 @@ define @vfmax_nxv16bf16_vf( %a, bfl ret %v } -declare @llvm.maxnum.nxv32bf16(, ) - define @vfmax_nxv32bf16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv32bf16_vv: ; CHECK: # %bb.0: @@ -261,8 +251,6 @@ define @vfmax_nxv32bf16_vf( %a, bfl ret %v } -declare @llvm.maxnum.nxv1f16(, ) - define @vfmax_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -307,8 +295,6 @@ define @vfmax_nxv1f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv2f16(, ) - define @vfmax_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -353,8 +339,6 @@ define @vfmax_nxv2f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv4f16(, ) - define @vfmax_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -399,8 +383,6 @@ define @vfmax_nxv4f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv8f16(, ) - define @vfmax_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -445,8 +427,6 @@ define @vfmax_nxv8f16_vf( %a, half %b) { ret %v } -declare @llvm.maxnum.nxv16f16(, ) - define @vfmax_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -491,8 +471,6 @@ define @vfmax_nxv16f16_vf( %a, half %b) ret %v } -declare @llvm.maxnum.nxv32f16(, ) - define @vfmax_nxv32f16_vv( %a, %b) { ; ZVFH-LABEL: vfmax_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -584,8 +562,6 @@ define @vfmax_nxv32f16_vf( %a, half %b) ret %v } -declare @llvm.maxnum.nxv1f32(, ) - define @vfmax_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f32_vv: ; CHECK: # %bb.0: @@ -608,8 +584,6 @@ define @vfmax_nxv1f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv2f32(, ) - define @vfmax_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f32_vv: ; CHECK: # %bb.0: @@ -632,8 +606,6 @@ define @vfmax_nxv2f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv4f32(, ) - define @vfmax_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f32_vv: ; CHECK: # %bb.0: @@ -656,8 +628,6 @@ define @vfmax_nxv4f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv8f32(, ) - define @vfmax_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f32_vv: ; CHECK: # %bb.0: @@ -680,8 +650,6 @@ define @vfmax_nxv8f32_vf( %a, float %b) ret %v } -declare @llvm.maxnum.nxv16f32(, ) - define @vfmax_nxv16f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16f32_vv: ; CHECK: # %bb.0: @@ -704,8 +672,6 @@ define @vfmax_nxv16f32_vf( %a, float ret %v } -declare @llvm.maxnum.nxv1f64(, ) - define @vfmax_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f64_vv: ; CHECK: # %bb.0: @@ -728,8 +694,6 @@ define @vfmax_nxv1f64_vf( %a, double ret %v } -declare @llvm.maxnum.nxv2f64(, ) - define @vfmax_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f64_vv: ; CHECK: # %bb.0: @@ -752,8 +716,6 @@ define @vfmax_nxv2f64_vf( %a, double ret %v } -declare @llvm.maxnum.nxv4f64(, ) - define @vfmax_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f64_vv: ; CHECK: # %bb.0: @@ -776,8 +738,6 @@ define @vfmax_nxv4f64_vf( %a, double ret %v } -declare @llvm.maxnum.nxv8f64(, ) - define @vfmax_nxv8f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll index 2ed6bf08b5672..394887fee67fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.maxnum.nxv1bf16(, , , i32) - define @vfmax_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -44,8 +42,6 @@ define @vfmax_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv2bf16(, , , i32) - define @vfmax_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -76,8 +72,6 @@ define @vfmax_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv4bf16(, , , i32) - define @vfmax_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define @vfmax_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv8bf16(, , , i32) - define @vfmax_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @vfmax_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv16bf16(, , , i32) - define @vfmax_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vfmax_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.maxnum.nxv32bf16(, , , i32) - define @vfmax_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -279,7 +267,6 @@ define @vfmax_vv_nxv32bf16_unmasked( @llvm.vp.maxnum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.maxnum.nxv1f16(, , , i32) define @vfmax_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv1f16: @@ -323,8 +310,6 @@ define @vfmax_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv2f16(, , , i32) - define @vfmax_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -367,8 +352,6 @@ define @vfmax_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv4f16(, , , i32) - define @vfmax_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -411,8 +394,6 @@ define @vfmax_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv8f16(, , , i32) - define @vfmax_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -455,8 +436,6 @@ define @vfmax_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.maxnum.nxv16f16(, , , i32) - define @vfmax_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -499,8 +478,6 @@ define @vfmax_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.maxnum.nxv32f16(, , , i32) - define @vfmax_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmax_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -619,8 +596,6 @@ define @vfmax_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.maxnum.nxv1f32(, , , i32) - define @vfmax_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32: ; CHECK: # %bb.0: @@ -641,8 +616,6 @@ define @vfmax_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv2f32(, , , i32) - define @vfmax_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32: ; CHECK: # %bb.0: @@ -663,8 +636,6 @@ define @vfmax_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv4f32(, , , i32) - define @vfmax_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32: ; CHECK: # %bb.0: @@ -685,8 +656,6 @@ define @vfmax_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv8f32(, , , i32) - define @vfmax_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32: ; CHECK: # %bb.0: @@ -707,8 +676,6 @@ define @vfmax_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.maxnum.nxv16f32(, , , i32) - define @vfmax_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f32: ; CHECK: # %bb.0: @@ -729,8 +696,6 @@ define @vfmax_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.maxnum.nxv1f64(, , , i32) - define @vfmax_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64: ; CHECK: # %bb.0: @@ -751,8 +716,6 @@ define @vfmax_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.maxnum.nxv2f64(, , , i32) - define @vfmax_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64: ; CHECK: # %bb.0: @@ -773,8 +736,6 @@ define @vfmax_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.maxnum.nxv4f64(, , , i32) - define @vfmax_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64: ; CHECK: # %bb.0: @@ -795,8 +756,6 @@ define @vfmax_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.maxnum.nxv8f64(, , , i32) - define @vfmax_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll index 23eb52afcc905..166faef24271b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfmax.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll index 86ba7c7fb7fe6..8a50a014c64cf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmerge.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll index cd9166ddbb7a9..7eec4b24e6bf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f16.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f16.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f16.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f16.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32f16.nxv32f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv32f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f32.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f32.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f32.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f32.nxv16f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv16f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f64.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f64.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f64.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f64.nxv8f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vfmerge.nxv8f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll index 37c0cf506a6fa..d40e39c3138f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll index 98ccbf03e1841..bb435c9d0114f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll @@ -44,8 +44,6 @@ define @vfmin_nxv1bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv2bf16(, ) - define @vfmin_nxv2bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2bf16_vv: ; CHECK: # %bb.0: @@ -78,8 +76,6 @@ define @vfmin_nxv2bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv4bf16(, ) - define @vfmin_nxv4bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4bf16_vv: ; CHECK: # %bb.0: @@ -112,8 +108,6 @@ define @vfmin_nxv4bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv8bf16(, ) - define @vfmin_nxv8bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8bf16_vv: ; CHECK: # %bb.0: @@ -146,8 +140,6 @@ define @vfmin_nxv8bf16_vf( %a, bfloat ret %v } -declare @llvm.minnum.nxv16bf16(, ) - define @vfmin_nxv16bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16bf16_vv: ; CHECK: # %bb.0: @@ -180,8 +172,6 @@ define @vfmin_nxv16bf16_vf( %a, bfl ret %v } -declare @llvm.minnum.nxv32bf16(, ) - define @vfmin_nxv32bf16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv32bf16_vv: ; CHECK: # %bb.0: @@ -261,8 +251,6 @@ define @vfmin_nxv32bf16_vf( %a, bfl ret %v } -declare @llvm.minnum.nxv1f16(, ) - define @vfmin_nxv1f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv1f16_vv: ; ZVFH: # %bb.0: @@ -307,8 +295,6 @@ define @vfmin_nxv1f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv2f16(, ) - define @vfmin_nxv2f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv2f16_vv: ; ZVFH: # %bb.0: @@ -353,8 +339,6 @@ define @vfmin_nxv2f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv4f16(, ) - define @vfmin_nxv4f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv4f16_vv: ; ZVFH: # %bb.0: @@ -399,8 +383,6 @@ define @vfmin_nxv4f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv8f16(, ) - define @vfmin_nxv8f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv8f16_vv: ; ZVFH: # %bb.0: @@ -445,8 +427,6 @@ define @vfmin_nxv8f16_vf( %a, half %b) { ret %v } -declare @llvm.minnum.nxv16f16(, ) - define @vfmin_nxv16f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv16f16_vv: ; ZVFH: # %bb.0: @@ -491,8 +471,6 @@ define @vfmin_nxv16f16_vf( %a, half %b) ret %v } -declare @llvm.minnum.nxv32f16(, ) - define @vfmin_nxv32f16_vv( %a, %b) { ; ZVFH-LABEL: vfmin_nxv32f16_vv: ; ZVFH: # %bb.0: @@ -584,8 +562,6 @@ define @vfmin_nxv32f16_vf( %a, half %b) ret %v } -declare @llvm.minnum.nxv1f32(, ) - define @vfmin_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f32_vv: ; CHECK: # %bb.0: @@ -608,8 +584,6 @@ define @vfmin_nxv1f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv2f32(, ) - define @vfmin_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f32_vv: ; CHECK: # %bb.0: @@ -632,8 +606,6 @@ define @vfmin_nxv2f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv4f32(, ) - define @vfmin_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f32_vv: ; CHECK: # %bb.0: @@ -656,8 +628,6 @@ define @vfmin_nxv4f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv8f32(, ) - define @vfmin_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f32_vv: ; CHECK: # %bb.0: @@ -680,8 +650,6 @@ define @vfmin_nxv8f32_vf( %a, float %b) ret %v } -declare @llvm.minnum.nxv16f32(, ) - define @vfmin_nxv16f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16f32_vv: ; CHECK: # %bb.0: @@ -704,8 +672,6 @@ define @vfmin_nxv16f32_vf( %a, float ret %v } -declare @llvm.minnum.nxv1f64(, ) - define @vfmin_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f64_vv: ; CHECK: # %bb.0: @@ -728,8 +694,6 @@ define @vfmin_nxv1f64_vf( %a, double ret %v } -declare @llvm.minnum.nxv2f64(, ) - define @vfmin_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f64_vv: ; CHECK: # %bb.0: @@ -752,8 +716,6 @@ define @vfmin_nxv2f64_vf( %a, double ret %v } -declare @llvm.minnum.nxv4f64(, ) - define @vfmin_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f64_vv: ; CHECK: # %bb.0: @@ -776,8 +738,6 @@ define @vfmin_nxv4f64_vf( %a, double ret %v } -declare @llvm.minnum.nxv8f64(, ) - define @vfmin_nxv8f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll index 42e8de1b56c55..5c5542619b6ef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.minnum.nxv1bf16(, , , i32) - define @vfmin_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -44,8 +42,6 @@ define @vfmin_vv_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv2bf16(, , , i32) - define @vfmin_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -76,8 +72,6 @@ define @vfmin_vv_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv4bf16(, , , i32) - define @vfmin_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -108,8 +102,6 @@ define @vfmin_vv_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv8bf16(, , , i32) - define @vfmin_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @vfmin_vv_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv16bf16(, , , i32) - define @vfmin_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vfmin_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.minnum.nxv32bf16(, , , i32) - define @vfmin_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -279,7 +267,6 @@ define @vfmin_vv_nxv32bf16_unmasked( @llvm.vp.minnum.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.minnum.nxv1f16(, , , i32) define @vfmin_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv1f16: @@ -323,8 +310,6 @@ define @vfmin_vv_nxv1f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv2f16(, , , i32) - define @vfmin_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -367,8 +352,6 @@ define @vfmin_vv_nxv2f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv4f16(, , , i32) - define @vfmin_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -411,8 +394,6 @@ define @vfmin_vv_nxv4f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv8f16(, , , i32) - define @vfmin_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -455,8 +436,6 @@ define @vfmin_vv_nxv8f16_unmasked( %va, < ret %v } -declare @llvm.vp.minnum.nxv16f16(, , , i32) - define @vfmin_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -499,8 +478,6 @@ define @vfmin_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.minnum.nxv32f16(, , , i32) - define @vfmin_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmin_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -619,8 +596,6 @@ define @vfmin_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.minnum.nxv1f32(, , , i32) - define @vfmin_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32: ; CHECK: # %bb.0: @@ -641,8 +616,6 @@ define @vfmin_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv2f32(, , , i32) - define @vfmin_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32: ; CHECK: # %bb.0: @@ -663,8 +636,6 @@ define @vfmin_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv4f32(, , , i32) - define @vfmin_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32: ; CHECK: # %bb.0: @@ -685,8 +656,6 @@ define @vfmin_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv8f32(, , , i32) - define @vfmin_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32: ; CHECK: # %bb.0: @@ -707,8 +676,6 @@ define @vfmin_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.minnum.nxv16f32(, , , i32) - define @vfmin_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f32: ; CHECK: # %bb.0: @@ -729,8 +696,6 @@ define @vfmin_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.minnum.nxv1f64(, , , i32) - define @vfmin_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64: ; CHECK: # %bb.0: @@ -751,8 +716,6 @@ define @vfmin_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.minnum.nxv2f64(, , , i32) - define @vfmin_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64: ; CHECK: # %bb.0: @@ -773,8 +736,6 @@ define @vfmin_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.minnum.nxv4f64(, , , i32) - define @vfmin_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64: ; CHECK: # %bb.0: @@ -795,8 +756,6 @@ define @vfmin_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.minnum.nxv8f64(, , , i32) - define @vfmin_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll index 32048ca928d45..d060a24e665c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfmin.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll index 948d2196f2bb4..25fc46fd23699 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll index 31369b69bee15..72ed38b53d2ff 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vmfsac_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define @vmfsac_vf_nxv1f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vmfsac_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define @vmfsac_vf_nxv2f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vmfsac_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define @vmfsac_vf_nxv4f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vmfsac_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define @vmfsac_vf_nxv8f16_commute_ta( %a, ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vmfsac_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv16f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define @vmfsac_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vmfsac_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv32f16: ; CHECK: # %bb.0: @@ -721,11 +691,6 @@ define @vmfsac_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vmfsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f32: ; CHECK: # %bb.0: @@ -840,11 +805,6 @@ define @vmfsac_vf_nxv1f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vmfsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f32: ; CHECK: # %bb.0: @@ -959,11 +919,6 @@ define @vmfsac_vf_nxv2f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vmfsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1078,11 +1033,6 @@ define @vmfsac_vf_nxv4f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vmfsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1197,11 +1147,6 @@ define @vmfsac_vf_nxv8f32_commute_ta( % ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vmfsac_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1319,11 +1264,6 @@ define @vmfsac_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vmfsac_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1438,11 +1378,6 @@ define @vmfsac_vf_nxv1f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vmfsac_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1557,11 +1492,6 @@ define @vmfsac_vf_nxv2f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vmfsac_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1676,11 +1606,6 @@ define @vmfsac_vf_nxv4f64_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vmfsac_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll index 319c945435402..8cac1b32cd08f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll index 6838f37339e98..0902d7034b46a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll index 229c06999388c..5f9dc1ae273bf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfmsac and vfmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfmsub_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -67,8 +65,6 @@ define @vfmsub_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfmsub_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -123,8 +119,6 @@ define @vfmsub_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfmsub_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -179,8 +173,6 @@ define @vfmsub_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfmsub_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -235,8 +227,6 @@ define @vfmsub_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfmsub_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -291,8 +281,6 @@ define @vfmsub_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfmsub_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -469,8 +457,6 @@ define @vfmsub_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfmsub_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -495,8 +481,6 @@ define @vfmsub_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfmsub_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -521,8 +505,6 @@ define @vfmsub_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfmsub_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -547,8 +529,6 @@ define @vfmsub_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfmsub_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -573,8 +553,6 @@ define @vfmsub_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfmsub_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -600,8 +578,6 @@ define @vfmsub_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfmsub_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -626,8 +602,6 @@ define @vfmsub_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfmsub_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -652,8 +626,6 @@ define @vfmsub_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfmsub_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -678,8 +650,6 @@ define @vfmsub_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfmsub_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll index 1f99d0e3a5b4b..4a4079925cb05 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfmsac and vfmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.fma.v1f16(, , ) - define @vfmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -37,8 +35,6 @@ define @vfmsub_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -63,8 +59,6 @@ define @vfmsub_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -89,8 +83,6 @@ define @vfmsub_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -115,8 +107,6 @@ define @vfmsub_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -141,8 +131,6 @@ define @vfmsub_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -168,8 +156,6 @@ define @vfmsub_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -194,8 +180,6 @@ define @vfmsub_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -220,8 +204,6 @@ define @vfmsub_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -246,8 +228,6 @@ define @vfmsub_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -272,8 +252,6 @@ define @vfmsub_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -299,8 +277,6 @@ define @vfmsub_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -325,8 +301,6 @@ define @vfmsub_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -351,8 +325,6 @@ define @vfmsub_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -377,8 +349,6 @@ define @vfmsub_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll index 23b4479fa8c94..7b9e6c4f9c02d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll index 44bce723c39d4..609ef8fb149b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -555,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -579,13 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll index 990d3d4e227df..7640cd565e448 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll @@ -267,7 +267,6 @@ define @vfmul_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fmul.nxv1f16(, , metadata, metadata) define @vfmul_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -315,7 +314,6 @@ define @vfmul_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv2f16(, , metadata, metadata) define @vfmul_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -363,7 +361,6 @@ define @vfmul_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv4f16(, , metadata, metadata) define @vfmul_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -411,7 +408,6 @@ define @vfmul_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv8f16(, , metadata, metadata) define @vfmul_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -459,7 +455,6 @@ define @vfmul_vf_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fmul.nxv16f16(, , metadata, metadata) define @vfmul_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -507,7 +502,6 @@ define @vfmul_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv32f16(, , metadata, metadata) define @vfmul_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfmul_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -600,7 +594,6 @@ define @vfmul_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv1f32(, , metadata, metadata) define @vfmul_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -624,7 +617,6 @@ define @vfmul_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv2f32(, , metadata, metadata) define @vfmul_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -648,7 +640,6 @@ define @vfmul_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv4f32(, , metadata, metadata) define @vfmul_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -672,7 +663,6 @@ define @vfmul_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv8f32(, , metadata, metadata) define @vfmul_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -696,7 +686,6 @@ define @vfmul_vf_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fmul.nxv16f32(, , metadata, metadata) define @vfmul_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -720,7 +709,6 @@ define @vfmul_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fmul.nxv1f64(, , metadata, metadata) define @vfmul_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -744,7 +732,6 @@ define @vfmul_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fmul.nxv2f64(, , metadata, metadata) define @vfmul_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -768,7 +755,6 @@ define @vfmul_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fmul.nxv4f64(, , metadata, metadata) define @vfmul_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -792,7 +778,6 @@ define @vfmul_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fmul.nxv8f64(, , metadata, metadata) define @vfmul_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfmul_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll index 5c8e499d2f5e1..eb77b4b4dbac3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fmul.nxv1f16(, , , i32) - define @vfmul_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -102,8 +100,6 @@ define @vfmul_vf_nxv1f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv2f16(, , , i32) - define @vfmul_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -196,8 +192,6 @@ define @vfmul_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv4f16(, , , i32) - define @vfmul_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -290,8 +284,6 @@ define @vfmul_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv8f16(, , , i32) - define @vfmul_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -384,8 +376,6 @@ define @vfmul_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fmul.nxv16f16(, , , i32) - define @vfmul_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -478,8 +468,6 @@ define @vfmul_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fmul.nxv32f16(, , , i32) - define @vfmul_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmul_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -735,8 +723,6 @@ define @vfmul_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fmul.nxv1f32(, , , i32) - define @vfmul_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f32: ; CHECK: # %bb.0: @@ -781,8 +767,6 @@ define @vfmul_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv2f32(, , , i32) - define @vfmul_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f32: ; CHECK: # %bb.0: @@ -827,8 +811,6 @@ define @vfmul_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv4f32(, , , i32) - define @vfmul_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f32: ; CHECK: # %bb.0: @@ -873,8 +855,6 @@ define @vfmul_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv8f32(, , , i32) - define @vfmul_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f32: ; CHECK: # %bb.0: @@ -919,8 +899,6 @@ define @vfmul_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fmul.nxv16f32(, , , i32) - define @vfmul_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv16f32: ; CHECK: # %bb.0: @@ -965,8 +943,6 @@ define @vfmul_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fmul.nxv1f64(, , , i32) - define @vfmul_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1011,8 +987,6 @@ define @vfmul_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fmul.nxv2f64(, , , i32) - define @vfmul_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1057,8 +1031,6 @@ define @vfmul_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fmul.nxv4f64(, , , i32) - define @vfmul_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1103,8 +1075,6 @@ define @vfmul_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fmul.nxv7f64(, , , i32) - define @vfmul_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1115,8 +1085,6 @@ define @vfmul_vv_nxv7f64( %va, %v } -declare @llvm.vp.fmul.nxv8f64(, , , i32) - define @vfmul_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll index 86c0ee0c629f8..8e8f2de3bb5eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -505,12 +375,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -529,13 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -757,12 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,13 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -807,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -831,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -857,12 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -881,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -907,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -931,13 +691,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -957,12 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -981,13 +728,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -1007,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1031,13 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -1057,12 +784,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1081,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1107,12 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1131,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1181,13 +876,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1207,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +913,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1257,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1281,13 +950,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1307,12 +969,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1331,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1357,12 +1006,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1381,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1407,12 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1431,13 +1061,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1457,12 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1481,13 +1098,6 @@ entry: ret %a } -declare @llvm.riscv.vfmul.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll index 871eb844ec2d0..03de2c97e685c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fmuladd.nxv1f16(, , , , i32) - define @vfma_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f16: ; CHECK: # %bb.0: @@ -75,8 +73,6 @@ define @vfma_vf_nxv1f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv2f16(, , , , i32) - define @vfma_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f16: ; CHECK: # %bb.0: @@ -146,8 +142,6 @@ define @vfma_vf_nxv2f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv4f16(, , , , i32) - define @vfma_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f16: ; CHECK: # %bb.0: @@ -217,8 +211,6 @@ define @vfma_vf_nxv4f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv8f16(, , , , i32) - define @vfma_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f16: ; CHECK: # %bb.0: @@ -288,8 +280,6 @@ define @vfma_vf_nxv8f16_unmasked_commute( ret %v } -declare @llvm.vp.fmuladd.nxv16f16(, , , , i32) - define @vfma_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f16: ; CHECK: # %bb.0: @@ -359,8 +349,6 @@ define @vfma_vf_nxv16f16_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv32f16(, , , , i32) - define @vfma_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv32f16: ; CHECK: # %bb.0: @@ -432,8 +420,6 @@ define @vfma_vf_nxv32f16_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv1f32(, , , , i32) - define @vfma_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32: ; CHECK: # %bb.0: @@ -503,8 +489,6 @@ define @vfma_vf_nxv1f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv2f32(, , , , i32) - define @vfma_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32: ; CHECK: # %bb.0: @@ -574,8 +558,6 @@ define @vfma_vf_nxv2f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv4f32(, , , , i32) - define @vfma_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32: ; CHECK: # %bb.0: @@ -645,8 +627,6 @@ define @vfma_vf_nxv4f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv8f32(, , , , i32) - define @vfma_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32: ; CHECK: # %bb.0: @@ -716,8 +696,6 @@ define @vfma_vf_nxv8f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv16f32(, , , , i32) - define @vfma_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f32: ; CHECK: # %bb.0: @@ -789,8 +767,6 @@ define @vfma_vf_nxv16f32_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv1f64(, , , , i32) - define @vfma_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64: ; CHECK: # %bb.0: @@ -860,8 +836,6 @@ define @vfma_vf_nxv1f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv2f64(, , , , i32) - define @vfma_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64: ; CHECK: # %bb.0: @@ -931,8 +905,6 @@ define @vfma_vf_nxv2f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv4f64(, , , , i32) - define @vfma_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1002,8 +974,6 @@ define @vfma_vf_nxv4f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv7f64(, , , , i32) - define @vfma_vv_nxv7f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1027,8 +997,6 @@ define @vfma_vv_nxv7f64_unmasked( %va ret %v } -declare @llvm.vp.fmuladd.nxv8f64(, , , , i32) - define @vfma_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1100,8 +1068,6 @@ define @vfma_vf_nxv8f64_unmasked_commute( %v } -declare @llvm.vp.fmuladd.nxv16f64(, , , , i32) - define @vfma_vv_nxv16f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f64: ; CHECK: # %bb.0: @@ -1297,8 +1263,6 @@ define @vfma_vv_nxv16f64_unmasked( ret %v } -declare @llvm.vp.fneg.nxv1f16(, , i32) - define @vfmsub_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -1688,8 +1652,6 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f16(, , i32) - define @vfmsub_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -2079,8 +2041,6 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f16(, , i32) - define @vfmsub_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -2470,8 +2430,6 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f16(, , i32) - define @vfmsub_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -2861,8 +2819,6 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f16(, , i32) - define @vfmsub_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -3252,8 +3208,6 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv32f16(, , i32) - define @vfmsub_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -3653,8 +3607,6 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f32(, , i32) - define @vfmsub_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -4044,8 +3996,6 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f32(, , i32) - define @vfmsub_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -4435,8 +4385,6 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f32(, , i32) - define @vfmsub_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -4826,8 +4774,6 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f32(, , i32) - define @vfmsub_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -5217,8 +5163,6 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv16f32(, , i32) - define @vfmsub_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -5618,8 +5562,6 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv1f64(, , i32) - define @vfmsub_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -6009,8 +5951,6 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv2f64(, , i32) - define @vfmsub_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -6400,8 +6340,6 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv4f64(, , i32) - define @vfmsub_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -6791,8 +6729,6 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %v } -declare @llvm.vp.fneg.nxv8f64(, , i32) - define @vfmsub_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll index 7a63a4710c534..dd1a28d4c59ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+experimental-zvfbfa -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+experimental-zvfbfa -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s -declare bfloat @llvm.riscv.vfmv.f.s.nxv1bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv1bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -15,8 +13,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv2bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv2bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -28,8 +24,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv4bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv4bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -41,8 +35,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv8bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv8bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -54,8 +46,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv16bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv16bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -67,8 +57,6 @@ entry: ret bfloat %a } -declare bfloat @llvm.riscv.vfmv.f.s.nxv32bf16() - define bfloat @intrinsic_vfmv.f.s_s_nxv32bf16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll index a810809fca515..564160d11ddeb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vfmv.s.f.nxv1bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv16bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv32bf16(, bfloat, iXLen) - define @intrinsic_vfmv.s.f_f_nxv32bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll index f3293ddc83ef9..ce856da16ccb7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv32bf16( - , - bfloat, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll index 3779b0ab18d8a..0819ec66f8f78 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zvfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+zvfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.riscv.vfmv.f.s.nxv1f16() - define half @intrinsic_vfmv.f.s_s_nxv1f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -15,8 +13,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv2f16() - define half @intrinsic_vfmv.f.s_s_nxv2f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -28,8 +24,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv4f16() - define half @intrinsic_vfmv.f.s_s_nxv4f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -41,8 +35,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv8f16() - define half @intrinsic_vfmv.f.s_s_nxv8f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -54,8 +46,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv16f16() - define half @intrinsic_vfmv.f.s_s_nxv16f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -67,8 +57,6 @@ entry: ret half %a } -declare half @llvm.riscv.vfmv.f.s.nxv32f16() - define half @intrinsic_vfmv.f.s_s_nxv32f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -80,8 +68,6 @@ entry: ret half %a } -declare float @llvm.riscv.vfmv.f.s.nxv1f32() - define float @intrinsic_vfmv.f.s_s_nxv1f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -93,8 +79,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv2f32() - define float @intrinsic_vfmv.f.s_s_nxv2f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -106,8 +90,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv4f32() - define float @intrinsic_vfmv.f.s_s_nxv4f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -119,8 +101,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv8f32() - define float @intrinsic_vfmv.f.s_s_nxv8f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -132,8 +112,6 @@ entry: ret float %a } -declare float @llvm.riscv.vfmv.f.s.nxv16f32() - define float @intrinsic_vfmv.f.s_s_nxv16f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -145,8 +123,6 @@ entry: ret float %a } -declare double @llvm.riscv.vfmv.f.s.nxv1f64() - define double @intrinsic_vfmv.f.s_s_nxv1f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -158,8 +134,6 @@ entry: ret double %a } -declare double @llvm.riscv.vfmv.f.s.nxv2f64() - define double @intrinsic_vfmv.f.s_s_nxv2f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -171,8 +145,6 @@ entry: ret double %a } -declare double @llvm.riscv.vfmv.f.s.nxv4f64() - define double @intrinsic_vfmv.f.s_s_nxv4f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -184,8 +156,6 @@ entry: ret double %a } -declare double @llvm.riscv.vfmv.f.s.nxv8f64() - define double @intrinsic_vfmv.f.s_s_nxv8f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll index 912dfe499016f..0e8ecc251d48f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, iXLen) - define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -95,8 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -108,8 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -121,8 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -134,8 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, iXLen) - define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -147,8 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -160,8 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -173,8 +147,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -186,8 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, iXLen) - define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll index 83d87a7a74b96..f7da7ac6db65b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfmv.v.f.nxv1f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv32f16( - , - half, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv16f32( - , - float, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv1f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv2f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv4f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfmv.v.f.nxv8f64( - , - double, - iXLen); - define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll index b1fd225d37aa9..9f74f5570e434 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -7,10 +7,6 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen, iXLen); define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: @@ -30,12 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -54,11 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -77,12 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -101,11 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -124,12 +98,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -148,11 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -171,12 +134,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -195,11 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -218,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -242,11 +188,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -265,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -289,11 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -312,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -336,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -359,12 +278,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -383,11 +296,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -406,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll index 9d74d6b85772a..6f793f371292b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll index 19740af4ebe0a..4a1fbd05028b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll index 7d587fd55cd83..ea2e5ffe4cba0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll index f0c2509371df8..b6063b0ac9759 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( - , - , - iXLen); - define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll index ee9e3d1b9f630..1b14b87114a86 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll index a71af7fe9e64a..4920539bb2ac1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll index 521f7274dc5c9..fac3724dfdc92 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll index c92909eb587e9..c5dd8af11b79c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( - , - , - iXLen); - define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll index ab9ebade287e6..a8e437a3e27d7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll index ee51b752b85f4..31adaa34d8588 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -427,11 +328,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,12 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -474,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,12 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -521,11 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -544,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -568,11 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -591,12 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -615,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -638,12 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -662,11 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -685,12 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll index 61c6803ce12bd..c9acf9c524537 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll index 1035ec9f643d5..92cbf536b23da 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -427,11 +328,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,12 +346,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -474,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,12 +382,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -521,11 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -544,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -568,11 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -591,12 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -615,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -638,12 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -662,11 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -685,12 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll index 5eae28aeac882..42669083a33ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv1bf16.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv1bf16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv1bf16.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv1bf16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv2bf16.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv2bf16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv2bf16.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv2bf16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv4bf16.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv4bf16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv4bf16.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv4bf16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv8bf16.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv8bf16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv8bf16.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv8bf16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.nxv16bf16.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfncvtbf16_f.f.w_nxv16bf16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfncvtbf16.f.f.w.mask.nxv16bf16.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfncvtbf16_mask_f.f.w_nxv16bf16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvtbf16_mask_f.f.w_nxv16bf16_nxv16f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll index 9bd24c44b1b90..96fbe3f6ff025 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -300,8 +300,6 @@ define @vfneg_vv_nxv32bf16_unmasked( %v } -declare @llvm.vp.fneg.nxv1f16(, , i32) - define @vfneg_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -350,8 +348,6 @@ define @vfneg_vv_nxv1f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv2f16(, , i32) - define @vfneg_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -400,8 +396,6 @@ define @vfneg_vv_nxv2f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv4f16(, , i32) - define @vfneg_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -450,8 +444,6 @@ define @vfneg_vv_nxv4f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv8f16(, , i32) - define @vfneg_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -500,8 +492,6 @@ define @vfneg_vv_nxv8f16_unmasked( %va, i ret %v } -declare @llvm.vp.fneg.nxv16f16(, , i32) - define @vfneg_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -550,8 +540,6 @@ define @vfneg_vv_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fneg.nxv32f16(, , i32) - define @vfneg_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfneg_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -600,8 +588,6 @@ define @vfneg_vv_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fneg.nxv1f32(, , i32) - define @vfneg_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f32: ; CHECK: # %bb.0: @@ -622,8 +608,6 @@ define @vfneg_vv_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv2f32(, , i32) - define @vfneg_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f32: ; CHECK: # %bb.0: @@ -644,8 +628,6 @@ define @vfneg_vv_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv4f32(, , i32) - define @vfneg_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f32: ; CHECK: # %bb.0: @@ -666,8 +648,6 @@ define @vfneg_vv_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv8f32(, , i32) - define @vfneg_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f32: ; CHECK: # %bb.0: @@ -688,8 +668,6 @@ define @vfneg_vv_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fneg.nxv16f32(, , i32) - define @vfneg_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f32: ; CHECK: # %bb.0: @@ -710,8 +688,6 @@ define @vfneg_vv_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fneg.nxv1f64(, , i32) - define @vfneg_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f64: ; CHECK: # %bb.0: @@ -732,8 +708,6 @@ define @vfneg_vv_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv2f64(, , i32) - define @vfneg_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f64: ; CHECK: # %bb.0: @@ -754,8 +728,6 @@ define @vfneg_vv_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv4f64(, , i32) - define @vfneg_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f64: ; CHECK: # %bb.0: @@ -776,8 +748,6 @@ define @vfneg_vv_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv7f64(, , i32) - define @vfneg_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv7f64: ; CHECK: # %bb.0: @@ -798,8 +768,6 @@ define @vfneg_vv_nxv7f64_unmasked( %v ret %v } -declare @llvm.vp.fneg.nxv8f64(, , i32) - define @vfneg_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f64: ; CHECK: # %bb.0: @@ -821,7 +789,6 @@ define @vfneg_vv_nxv8f64_unmasked( %v } ; Test splitting. -declare @llvm.vp.fneg.nxv16f64(, , i32) define @vfneg_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll index 4b4091ba7acbe..373c29721ce92 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll index 3b5cbb685a424..7f6fb030b13be 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vfnmacc_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f16: ; CHECK: # %bb.0: @@ -131,11 +126,6 @@ define @vfnmacc_vf_nxv1f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vfnmacc_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f16: ; CHECK: # %bb.0: @@ -258,11 +248,6 @@ define @vfnmacc_vf_nxv2f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vfnmacc_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f16: ; CHECK: # %bb.0: @@ -385,11 +370,6 @@ define @vfnmacc_vf_nxv4f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vfnmacc_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f16: ; CHECK: # %bb.0: @@ -512,11 +492,6 @@ define @vfnmacc_vf_nxv8f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vfnmacc_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv16f16: ; CHECK: # %bb.0: @@ -639,11 +614,6 @@ define @vfnmacc_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vfnmacc_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv32f16: ; CHECK: # %bb.0: @@ -769,11 +739,6 @@ define @vfnmacc_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vfnmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f32: ; CHECK: # %bb.0: @@ -896,11 +861,6 @@ define @vfnmacc_vf_nxv1f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vfnmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1023,11 +983,6 @@ define @vfnmacc_vf_nxv2f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vfnmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1150,11 +1105,6 @@ define @vfnmacc_vf_nxv4f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vfnmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1277,11 +1227,6 @@ define @vfnmacc_vf_nxv8f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vfnmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1407,11 +1352,6 @@ define @vfnmacc_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vfnmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1534,11 +1474,6 @@ define @vfnmacc_vf_nxv1f64_commute_ta( %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vfnmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1661,11 +1596,6 @@ define @vfnmacc_vf_nxv2f64_commute_ta( %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vfnmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1788,11 +1718,6 @@ define @vfnmacc_vf_nxv4f64_commute_ta( %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vfnmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll index 31df27853cb3c..5c2ebce184d6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll index 2bb6bf5ae9e26..66b347d4b661e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll index d774289e3eebb..16ff3b719a927 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfnmacc and vfnmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -71,8 +69,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -131,8 +127,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -191,8 +185,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -251,8 +243,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -311,8 +301,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -500,8 +488,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -528,8 +514,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -556,8 +540,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -584,8 +566,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -612,8 +592,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -641,8 +619,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -669,8 +645,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -697,8 +671,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -725,8 +697,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll index 07c85bc67339b..b0f5599cad740 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmacc and vfnmadd by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.fma.v1f16(, , ) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -35,8 +33,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -63,8 +59,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -91,8 +85,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -119,8 +111,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -147,8 +137,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -176,8 +164,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -204,8 +190,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -232,8 +216,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -260,8 +242,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -288,8 +268,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -317,8 +295,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -345,8 +321,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -373,8 +347,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -401,8 +373,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll index 6f41ed177beac..55f6aa2e2eb38 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll index cfbaafa00c043..221df7095e8c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll index edeb554bc6d35..37b223be1150c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fma.nxv1f16(, , , , i32) -declare @llvm.vp.fneg.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f16(, , , i32) -declare @llvm.vp.select.nxv1f16(, , , i32) - define @vfnmsac_vv_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f16: ; CHECK: # %bb.0: @@ -123,11 +118,6 @@ define @vfnmsac_vf_nxv1f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv2f16(, , , , i32) -declare @llvm.vp.fneg.nxv2f16(, , i32) -declare @llvm.vp.merge.nxv2f16(, , , i32) -declare @llvm.vp.select.nxv2f16(, , , i32) - define @vfnmsac_vv_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f16: ; CHECK: # %bb.0: @@ -242,11 +232,6 @@ define @vfnmsac_vf_nxv2f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv4f16(, , , , i32) -declare @llvm.vp.fneg.nxv4f16(, , i32) -declare @llvm.vp.merge.nxv4f16(, , , i32) -declare @llvm.vp.select.nxv4f16(, , , i32) - define @vfnmsac_vv_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f16: ; CHECK: # %bb.0: @@ -361,11 +346,6 @@ define @vfnmsac_vf_nxv4f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv8f16(, , , , i32) -declare @llvm.vp.fneg.nxv8f16(, , i32) -declare @llvm.vp.merge.nxv8f16(, , , i32) -declare @llvm.vp.select.nxv8f16(, , , i32) - define @vfnmsac_vv_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f16: ; CHECK: # %bb.0: @@ -480,11 +460,6 @@ define @vfnmsac_vf_nxv8f16_commute_ta( %a ret %u } -declare @llvm.vp.fma.nxv16f16(, , , , i32) -declare @llvm.vp.fneg.nxv16f16(, , i32) -declare @llvm.vp.merge.nxv16f16(, , , i32) -declare @llvm.vp.select.nxv16f16(, , , i32) - define @vfnmsac_vv_nxv16f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv16f16: ; CHECK: # %bb.0: @@ -599,11 +574,6 @@ define @vfnmsac_vf_nxv16f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv32f16(, , , , i32) -declare @llvm.vp.fneg.nxv32f16(, , i32) -declare @llvm.vp.merge.nxv32f16(, , , i32) -declare @llvm.vp.select.nxv32f16(, , , i32) - define @vfnmsac_vv_nxv32f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv32f16: ; CHECK: # %bb.0: @@ -721,11 +691,6 @@ define @vfnmsac_vf_nxv32f16_commute_ta( ret %u } -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) -declare @llvm.vp.select.nxv1f32(, , , i32) - define @vfnmsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f32: ; CHECK: # %bb.0: @@ -840,11 +805,6 @@ define @vfnmsac_vf_nxv1f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) - define @vfnmsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f32: ; CHECK: # %bb.0: @@ -959,11 +919,6 @@ define @vfnmsac_vf_nxv2f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) - define @vfnmsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1078,11 +1033,6 @@ define @vfnmsac_vf_nxv4f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) - define @vfnmsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1197,11 +1147,6 @@ define @vfnmsac_vf_nxv8f32_commute_ta( ret %u } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) - define @vfnmsac_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1319,11 +1264,6 @@ define @vfnmsac_vf_nxv16f32_commute_ta( %u } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) - define @vfnmsac_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1438,11 +1378,6 @@ define @vfnmsac_vf_nxv1f64_commute_ta( %u } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) - define @vfnmsac_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1557,11 +1492,6 @@ define @vfnmsac_vf_nxv2f64_commute_ta( %u } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) - define @vfnmsac_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1676,11 +1606,6 @@ define @vfnmsac_vf_nxv4f64_commute_ta( %u } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) - define @vfnmsac_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll index 50497d92764a5..f874e05465a09 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll index 5ebbb90c4c5a2..d400de99b49f3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16bf16.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll index 96c28e4c6e0e2..68af72da4126f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll @@ -11,8 +11,6 @@ ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.experimental.constrained.fma.nxv1f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv1f16: ; ZVFH: # %bb.0: @@ -67,8 +65,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -123,8 +119,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -179,8 +173,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -235,8 +227,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -291,8 +281,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv32f16(, , , metadata, metadata) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) strictfp { ; ZVFH-LABEL: vfnmsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -446,8 +434,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -472,8 +458,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -498,8 +482,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -524,8 +506,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -550,8 +530,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv16f32(, , , metadata, metadata) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -577,8 +555,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv1f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -603,8 +579,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv2f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -629,8 +603,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv4f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -655,8 +627,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.experimental.constrained.fma.nxv8f64(, , , metadata, metadata) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) strictfp { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll index a356da80e1639..c78dfb26d53d0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll @@ -7,8 +7,6 @@ ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to ; trigger commuting in TwoAddressInstructionPass. -declare @llvm.fma.v1f16(, , ) - define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define @vfnmsub_vf_nxv1f16( %va, %vd } -declare @llvm.fma.v2f16(, , ) - define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16: ; CHECK: # %bb.0: @@ -59,8 +55,6 @@ define @vfnmsub_vf_nxv2f16( %va, %vd } -declare @llvm.fma.v4f16(, , ) - define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16: ; CHECK: # %bb.0: @@ -85,8 +79,6 @@ define @vfnmsub_vf_nxv4f16( %va, %vd } -declare @llvm.fma.v8f16(, , ) - define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16: ; CHECK: # %bb.0: @@ -111,8 +103,6 @@ define @vfnmsub_vf_nxv8f16( %va, %vd } -declare @llvm.fma.v16f16(, , ) - define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16: ; CHECK: # %bb.0: @@ -137,8 +127,6 @@ define @vfnmsub_vf_nxv16f16( %va, %vd } -declare @llvm.fma.v32f16(, , ) - define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: @@ -164,8 +152,6 @@ define @vfnmsub_vf_nxv32f16( %va, %vd } -declare @llvm.fma.v1f32(, , ) - define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -190,8 +176,6 @@ define @vfnmsub_vf_nxv1f32( %va, %vd } -declare @llvm.fma.v2f32(, , ) - define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define @vfnmsub_vf_nxv2f32( %va, %vd } -declare @llvm.fma.v4f32(, , ) - define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -242,8 +224,6 @@ define @vfnmsub_vf_nxv4f32( %va, %vd } -declare @llvm.fma.v8f32(, , ) - define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -268,8 +248,6 @@ define @vfnmsub_vf_nxv8f32( %va, %vd } -declare @llvm.fma.v16f32(, , ) - define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -295,8 +273,6 @@ define @vfnmsub_vf_nxv16f32( %va, %vd } -declare @llvm.fma.v1f64(, , ) - define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -321,8 +297,6 @@ define @vfnmsub_vf_nxv1f64( %va, %vd } -declare @llvm.fma.v2f64(, , ) - define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -347,8 +321,6 @@ define @vfnmsub_vf_nxv2f64( %va, %vd } -declare @llvm.fma.v4f64(, , ) - define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -373,8 +345,6 @@ define @vfnmsub_vf_nxv4f64( %va, %vd } -declare @llvm.fma.v8f64(, , ) - define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll index c5d5bb1fe0b3e..263b0161c04fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv16f16.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -904,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -928,13 +688,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -954,12 +707,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -978,13 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1004,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv8f32.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1028,13 +762,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1054,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv1f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1078,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +818,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv2f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1128,13 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.nxv4f64.f64( - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1178,13 +873,6 @@ entry: ret %a } -declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( - , - double, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll index 9c77a6818bcb2..5ee8876ce73ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fpext.nxv1f32.nxv1f16(, metadata) define @vfpext_nxv1f16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1f16_nxv1f32: ; CHECK: # %bb.0: @@ -16,7 +15,6 @@ define @vfpext_nxv1f16_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f64.nxv1f16(, metadata) define @vfpext_nxv1f16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1f16_nxv1f64: ; CHECK: # %bb.0: @@ -29,7 +27,6 @@ define @vfpext_nxv1f16_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f32.nxv2f16(, metadata) define @vfpext_nxv2f16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: ; CHECK: # %bb.0: @@ -41,7 +38,6 @@ define @vfpext_nxv2f16_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f64.nxv2f16(, metadata) define @vfpext_nxv2f16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: ; CHECK: # %bb.0: @@ -54,7 +50,6 @@ define @vfpext_nxv2f16_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f32.nxv4f16(, metadata) define @vfpext_nxv4f16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4f16_nxv4f32: ; CHECK: # %bb.0: @@ -66,7 +61,6 @@ define @vfpext_nxv4f16_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f64.nxv4f16(, metadata) define @vfpext_nxv4f16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4f16_nxv4f64: ; CHECK: # %bb.0: @@ -79,7 +73,6 @@ define @vfpext_nxv4f16_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f32.nxv8f16(, metadata) define @vfpext_nxv8f16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8f16_nxv8f32: ; CHECK: # %bb.0: @@ -91,7 +84,6 @@ define @vfpext_nxv8f16_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f64.nxv8f16(, metadata) define @vfpext_nxv8f16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8f16_nxv8f64: ; CHECK: # %bb.0: @@ -104,7 +96,6 @@ define @vfpext_nxv8f16_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f64.nxv1f32(, metadata) define @vfpext_nxv1f32_nxv1f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1f32_nxv1f64: ; CHECK: # %bb.0: @@ -116,7 +107,6 @@ define @vfpext_nxv1f32_nxv1f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f64.nxv2f32(, metadata) define @vfpext_nxv2f32_nxv2f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: ; CHECK: # %bb.0: @@ -128,7 +118,6 @@ define @vfpext_nxv2f32_nxv2f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f64.nxv4f32(, metadata) define @vfpext_nxv4f32_nxv4f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4f32_nxv4f64: ; CHECK: # %bb.0: @@ -140,7 +129,6 @@ define @vfpext_nxv4f32_nxv4f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f64.nxv8f32(, metadata) define @vfpext_nxv8f32_nxv8f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8f32_nxv8f64: ; CHECK: # %bb.0: @@ -152,7 +140,6 @@ define @vfpext_nxv8f32_nxv8f64( %va) s ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f32.nxv1bf16(, metadata) define @vfpext_nxv1bf16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1bf16_nxv1f32: ; CHECK: # %bb.0: @@ -164,7 +151,6 @@ define @vfpext_nxv1bf16_nxv1f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv1f64.nxv1bf16(, metadata) define @vfpext_nxv1bf16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv1bf16_nxv1f64: ; CHECK: # %bb.0: @@ -177,7 +163,6 @@ define @vfpext_nxv1bf16_nxv1f64( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f32.nxv2bf16(, metadata) define @vfpext_nxv2bf16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: @@ -189,7 +174,6 @@ define @vfpext_nxv2bf16_nxv2f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv2f64.nxv2bf16(, metadata) define @vfpext_nxv2bf16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f64: ; CHECK: # %bb.0: @@ -202,7 +186,6 @@ define @vfpext_nxv2bf16_nxv2f64( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f32.nxv4bf16(, metadata) define @vfpext_nxv4bf16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4bf16_nxv4f32: ; CHECK: # %bb.0: @@ -214,7 +197,6 @@ define @vfpext_nxv4bf16_nxv4f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv4f64.nxv4bf16(, metadata) define @vfpext_nxv4bf16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv4bf16_nxv4f64: ; CHECK: # %bb.0: @@ -227,7 +209,6 @@ define @vfpext_nxv4bf16_nxv4f64( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f32.nxv8bf16(, metadata) define @vfpext_nxv8bf16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8bf16_nxv8f32: ; CHECK: # %bb.0: @@ -239,7 +220,6 @@ define @vfpext_nxv8bf16_nxv8f32( %va) ret %evec } -declare @llvm.experimental.constrained.fpext.nxv8f64.nxv8bf16(, metadata) define @vfpext_nxv8bf16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vfpext_nxv8bf16_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll index 137b616d86fcc..458795db7965d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfpext_nxv2f16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define @vfpext_nxv2f16_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f16(, , i32) - define @vfpext_nxv2f16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define @vfpext_nxv2f16_nxv2f64_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfpext_nxv2f32_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: ; CHECK: # %bb.0: @@ -78,8 +72,6 @@ define @vfpext_nxv2f32_nxv2f64_unmasked( %v } -declare @llvm.vp.fpext.nxv7f64.nxv7f32(, , i32) - define @vfpext_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv7f32_nxv7f64: ; CHECK: # %bb.0: @@ -91,8 +83,6 @@ define @vfpext_nxv7f32_nxv7f64( %a, %v } -declare @llvm.vp.fpext.nxv32f32.nxv32f16(, , i32) - define @vfpext_nxv32f16_nxv32f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv32f16_nxv32f32: ; CHECK: # %bb.0: @@ -121,8 +111,6 @@ define @vfpext_nxv32f16_nxv32f32( %a, ret %v } -declare @llvm.vp.fpext.nxv2f32.nxv2bf16(, , i32) - define @vfpext_nxv2bf16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: @@ -145,8 +133,6 @@ define @vfpext_nxv2bf16_nxv2f32_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2bf16(, , i32) - define @vfpext_nxv2bf16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2bf16_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll index 9e7f4ede29f54..efcdc1e24b0b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define @vfptosi_nxv1f16_nxv1i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define @vfptoui_nxv1f16_nxv1i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i7( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7: ; CHECK: # %bb.0: @@ -42,7 +39,6 @@ define @vfptosi_nxv1f16_nxv1i7( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i7( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7: ; CHECK: # %bb.0: @@ -54,7 +50,6 @@ define @vfptoui_nxv1f16_nxv1i7( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8: ; CHECK: # %bb.0: @@ -66,7 +61,6 @@ define @vfptosi_nxv1f16_nxv1i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8: ; CHECK: # %bb.0: @@ -78,7 +72,6 @@ define @vfptoui_nxv1f16_nxv1i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16: ; CHECK: # %bb.0: @@ -89,7 +82,6 @@ define @vfptosi_nxv1f16_nxv1i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16: ; CHECK: # %bb.0: @@ -100,7 +92,6 @@ define @vfptoui_nxv1f16_nxv1i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32: ; CHECK: # %bb.0: @@ -112,7 +103,6 @@ define @vfptosi_nxv1f16_nxv1i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32: ; CHECK: # %bb.0: @@ -124,7 +114,6 @@ define @vfptoui_nxv1f16_nxv1i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(, metadata) define @vfptosi_nxv1f16_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i64: ; CHECK: # %bb.0: @@ -137,7 +126,6 @@ define @vfptosi_nxv1f16_nxv1i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(, metadata) define @vfptoui_nxv1f16_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i64: ; CHECK: # %bb.0: @@ -150,7 +138,6 @@ define @vfptoui_nxv1f16_nxv1i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -163,7 +150,6 @@ define @vfptosi_nxv2f16_nxv2i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -176,7 +162,6 @@ define @vfptoui_nxv2f16_nxv2i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8: ; CHECK: # %bb.0: @@ -188,7 +173,6 @@ define @vfptosi_nxv2f16_nxv2i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8: ; CHECK: # %bb.0: @@ -200,7 +184,6 @@ define @vfptoui_nxv2f16_nxv2i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16: ; CHECK: # %bb.0: @@ -211,7 +194,6 @@ define @vfptosi_nxv2f16_nxv2i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16: ; CHECK: # %bb.0: @@ -222,7 +204,6 @@ define @vfptoui_nxv2f16_nxv2i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32: ; CHECK: # %bb.0: @@ -234,7 +215,6 @@ define @vfptosi_nxv2f16_nxv2i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32: ; CHECK: # %bb.0: @@ -246,7 +226,6 @@ define @vfptoui_nxv2f16_nxv2i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(, metadata) define @vfptosi_nxv2f16_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i64: ; CHECK: # %bb.0: @@ -259,7 +238,6 @@ define @vfptosi_nxv2f16_nxv2i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(, metadata) define @vfptoui_nxv2f16_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i64: ; CHECK: # %bb.0: @@ -272,7 +250,6 @@ define @vfptoui_nxv2f16_nxv2i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1: ; CHECK: # %bb.0: @@ -285,7 +262,6 @@ define @vfptosi_nxv4f16_nxv4i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1: ; CHECK: # %bb.0: @@ -298,7 +274,6 @@ define @vfptoui_nxv4f16_nxv4i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8: ; CHECK: # %bb.0: @@ -310,7 +285,6 @@ define @vfptosi_nxv4f16_nxv4i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8: ; CHECK: # %bb.0: @@ -322,7 +296,6 @@ define @vfptoui_nxv4f16_nxv4i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16: ; CHECK: # %bb.0: @@ -333,7 +306,6 @@ define @vfptosi_nxv4f16_nxv4i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16: ; CHECK: # %bb.0: @@ -344,7 +316,6 @@ define @vfptoui_nxv4f16_nxv4i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32: ; CHECK: # %bb.0: @@ -356,7 +327,6 @@ define @vfptosi_nxv4f16_nxv4i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32: ; CHECK: # %bb.0: @@ -368,7 +338,6 @@ define @vfptoui_nxv4f16_nxv4i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(, metadata) define @vfptosi_nxv4f16_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i64: ; CHECK: # %bb.0: @@ -381,7 +350,6 @@ define @vfptosi_nxv4f16_nxv4i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(, metadata) define @vfptoui_nxv4f16_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i64: ; CHECK: # %bb.0: @@ -394,7 +362,6 @@ define @vfptoui_nxv4f16_nxv4i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1: ; CHECK: # %bb.0: @@ -407,7 +374,6 @@ define @vfptosi_nxv8f16_nxv8i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1: ; CHECK: # %bb.0: @@ -420,7 +386,6 @@ define @vfptoui_nxv8f16_nxv8i1( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8: ; CHECK: # %bb.0: @@ -432,7 +397,6 @@ define @vfptosi_nxv8f16_nxv8i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8: ; CHECK: # %bb.0: @@ -444,7 +408,6 @@ define @vfptoui_nxv8f16_nxv8i8( %va) strict ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16: ; CHECK: # %bb.0: @@ -455,7 +418,6 @@ define @vfptosi_nxv8f16_nxv8i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16: ; CHECK: # %bb.0: @@ -466,7 +428,6 @@ define @vfptoui_nxv8f16_nxv8i16( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32: ; CHECK: # %bb.0: @@ -478,7 +439,6 @@ define @vfptosi_nxv8f16_nxv8i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32: ; CHECK: # %bb.0: @@ -490,7 +450,6 @@ define @vfptoui_nxv8f16_nxv8i32( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(, metadata) define @vfptosi_nxv8f16_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i64: ; CHECK: # %bb.0: @@ -503,7 +462,6 @@ define @vfptosi_nxv8f16_nxv8i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(, metadata) define @vfptoui_nxv8f16_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i64: ; CHECK: # %bb.0: @@ -516,7 +474,6 @@ define @vfptoui_nxv8f16_nxv8i64( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1: ; CHECK: # %bb.0: @@ -529,7 +486,6 @@ define @vfptosi_nxv16f16_nxv16i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1: ; CHECK: # %bb.0: @@ -542,7 +498,6 @@ define @vfptoui_nxv16f16_nxv16i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8: ; CHECK: # %bb.0: @@ -554,7 +509,6 @@ define @vfptosi_nxv16f16_nxv16i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8: ; CHECK: # %bb.0: @@ -566,7 +520,6 @@ define @vfptoui_nxv16f16_nxv16i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16: ; CHECK: # %bb.0: @@ -577,7 +530,6 @@ define @vfptosi_nxv16f16_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16: ; CHECK: # %bb.0: @@ -588,7 +540,6 @@ define @vfptoui_nxv16f16_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(, metadata) define @vfptosi_nxv16f16_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32: ; CHECK: # %bb.0: @@ -600,7 +551,6 @@ define @vfptosi_nxv16f16_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(, metadata) define @vfptoui_nxv16f16_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32: ; CHECK: # %bb.0: @@ -612,7 +562,6 @@ define @vfptoui_nxv16f16_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(, metadata) define @vfptosi_nxv32f16_nxv32i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1: ; CHECK: # %bb.0: @@ -625,7 +574,6 @@ define @vfptosi_nxv32f16_nxv32i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(, metadata) define @vfptoui_nxv32f16_nxv32i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1: ; CHECK: # %bb.0: @@ -638,7 +586,6 @@ define @vfptoui_nxv32f16_nxv32i1( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(, metadata) define @vfptosi_nxv32f16_nxv32i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8: ; CHECK: # %bb.0: @@ -650,7 +597,6 @@ define @vfptosi_nxv32f16_nxv32i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(, metadata) define @vfptoui_nxv32f16_nxv32i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8: ; CHECK: # %bb.0: @@ -662,7 +608,6 @@ define @vfptoui_nxv32f16_nxv32i8( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(, metadata) define @vfptosi_nxv32f16_nxv32i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16: ; CHECK: # %bb.0: @@ -673,7 +618,6 @@ define @vfptosi_nxv32f16_nxv32i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(, metadata) define @vfptoui_nxv32f16_nxv32i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16: ; CHECK: # %bb.0: @@ -684,7 +628,6 @@ define @vfptoui_nxv32f16_nxv32i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i1: ; CHECK: # %bb.0: @@ -697,7 +640,6 @@ define @vfptosi_nxv1f32_nxv1i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i1: ; CHECK: # %bb.0: @@ -710,7 +652,6 @@ define @vfptoui_nxv1f32_nxv1i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i8: ; CHECK: # %bb.0: @@ -723,7 +664,6 @@ define @vfptosi_nxv1f32_nxv1i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i8: ; CHECK: # %bb.0: @@ -736,7 +676,6 @@ define @vfptoui_nxv1f32_nxv1i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i16: ; CHECK: # %bb.0: @@ -748,7 +687,6 @@ define @vfptosi_nxv1f32_nxv1i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i16: ; CHECK: # %bb.0: @@ -760,7 +698,6 @@ define @vfptoui_nxv1f32_nxv1i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i32: ; CHECK: # %bb.0: @@ -771,7 +708,6 @@ define @vfptosi_nxv1f32_nxv1i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i32: ; CHECK: # %bb.0: @@ -782,7 +718,6 @@ define @vfptoui_nxv1f32_nxv1i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(, metadata) define @vfptosi_nxv1f32_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i64: ; CHECK: # %bb.0: @@ -794,7 +729,6 @@ define @vfptosi_nxv1f32_nxv1i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(, metadata) define @vfptoui_nxv1f32_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i64: ; CHECK: # %bb.0: @@ -806,7 +740,6 @@ define @vfptoui_nxv1f32_nxv1i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -819,7 +752,6 @@ define @vfptosi_nxv2f32_nxv2i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -832,7 +764,6 @@ define @vfptoui_nxv2f32_nxv2i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -845,7 +776,6 @@ define @vfptosi_nxv2f32_nxv2i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -858,7 +788,6 @@ define @vfptoui_nxv2f32_nxv2i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -870,7 +799,6 @@ define @vfptosi_nxv2f32_nxv2i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -882,7 +810,6 @@ define @vfptoui_nxv2f32_nxv2i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -893,7 +820,6 @@ define @vfptosi_nxv2f32_nxv2i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -904,7 +830,6 @@ define @vfptoui_nxv2f32_nxv2i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(, metadata) define @vfptosi_nxv2f32_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -916,7 +841,6 @@ define @vfptosi_nxv2f32_nxv2i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(, metadata) define @vfptoui_nxv2f32_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -928,7 +852,6 @@ define @vfptoui_nxv2f32_nxv2i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i1: ; CHECK: # %bb.0: @@ -941,7 +864,6 @@ define @vfptosi_nxv4f32_nxv4i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i1: ; CHECK: # %bb.0: @@ -954,7 +876,6 @@ define @vfptoui_nxv4f32_nxv4i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i8: ; CHECK: # %bb.0: @@ -967,7 +888,6 @@ define @vfptosi_nxv4f32_nxv4i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i8: ; CHECK: # %bb.0: @@ -980,7 +900,6 @@ define @vfptoui_nxv4f32_nxv4i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i16: ; CHECK: # %bb.0: @@ -992,7 +911,6 @@ define @vfptosi_nxv4f32_nxv4i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i16: ; CHECK: # %bb.0: @@ -1004,7 +922,6 @@ define @vfptoui_nxv4f32_nxv4i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i32: ; CHECK: # %bb.0: @@ -1015,7 +932,6 @@ define @vfptosi_nxv4f32_nxv4i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i32: ; CHECK: # %bb.0: @@ -1026,7 +942,6 @@ define @vfptoui_nxv4f32_nxv4i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(, metadata) define @vfptosi_nxv4f32_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i64: ; CHECK: # %bb.0: @@ -1038,7 +953,6 @@ define @vfptosi_nxv4f32_nxv4i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(, metadata) define @vfptoui_nxv4f32_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i64: ; CHECK: # %bb.0: @@ -1050,7 +964,6 @@ define @vfptoui_nxv4f32_nxv4i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i1: ; CHECK: # %bb.0: @@ -1063,7 +976,6 @@ define @vfptosi_nxv8f32_nxv8i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i1: ; CHECK: # %bb.0: @@ -1076,7 +988,6 @@ define @vfptoui_nxv8f32_nxv8i1( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i8: ; CHECK: # %bb.0: @@ -1089,7 +1000,6 @@ define @vfptosi_nxv8f32_nxv8i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i8: ; CHECK: # %bb.0: @@ -1102,7 +1012,6 @@ define @vfptoui_nxv8f32_nxv8i8( %va) stric ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i16: ; CHECK: # %bb.0: @@ -1114,7 +1023,6 @@ define @vfptosi_nxv8f32_nxv8i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i16: ; CHECK: # %bb.0: @@ -1126,7 +1034,6 @@ define @vfptoui_nxv8f32_nxv8i16( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i32: ; CHECK: # %bb.0: @@ -1137,7 +1044,6 @@ define @vfptosi_nxv8f32_nxv8i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i32: ; CHECK: # %bb.0: @@ -1148,7 +1054,6 @@ define @vfptoui_nxv8f32_nxv8i32( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(, metadata) define @vfptosi_nxv8f32_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i64: ; CHECK: # %bb.0: @@ -1160,7 +1065,6 @@ define @vfptosi_nxv8f32_nxv8i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(, metadata) define @vfptoui_nxv8f32_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i64: ; CHECK: # %bb.0: @@ -1172,7 +1076,6 @@ define @vfptoui_nxv8f32_nxv8i64( %va) str ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i1: ; CHECK: # %bb.0: @@ -1185,7 +1088,6 @@ define @vfptosi_nxv16f32_nxv16i1( %va) s ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i1: ; CHECK: # %bb.0: @@ -1198,7 +1100,6 @@ define @vfptoui_nxv16f32_nxv16i1( %va) s ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i8: ; CHECK: # %bb.0: @@ -1211,7 +1112,6 @@ define @vfptosi_nxv16f32_nxv16i8( %va) s ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i8: ; CHECK: # %bb.0: @@ -1224,7 +1124,6 @@ define @vfptoui_nxv16f32_nxv16i8( %va) s ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i16: ; CHECK: # %bb.0: @@ -1236,7 +1135,6 @@ define @vfptosi_nxv16f32_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i16: ; CHECK: # %bb.0: @@ -1248,7 +1146,6 @@ define @vfptoui_nxv16f32_nxv16i16( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(, metadata) define @vfptosi_nxv16f32_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i32: ; CHECK: # %bb.0: @@ -1259,7 +1156,6 @@ define @vfptosi_nxv16f32_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(, metadata) define @vfptoui_nxv16f32_nxv16i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i32: ; CHECK: # %bb.0: @@ -1270,7 +1166,6 @@ define @vfptoui_nxv16f32_nxv16i32( %va) ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i1: ; CHECK: # %bb.0: @@ -1283,7 +1178,6 @@ define @vfptosi_nxv1f64_nxv1i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i1: ; CHECK: # %bb.0: @@ -1296,7 +1190,6 @@ define @vfptoui_nxv1f64_nxv1i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i8: ; CHECK: # %bb.0: @@ -1311,7 +1204,6 @@ define @vfptosi_nxv1f64_nxv1i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i8: ; CHECK: # %bb.0: @@ -1326,7 +1218,6 @@ define @vfptoui_nxv1f64_nxv1i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i16: ; CHECK: # %bb.0: @@ -1339,7 +1230,6 @@ define @vfptosi_nxv1f64_nxv1i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i16: ; CHECK: # %bb.0: @@ -1352,7 +1242,6 @@ define @vfptoui_nxv1f64_nxv1i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i32: ; CHECK: # %bb.0: @@ -1364,7 +1253,6 @@ define @vfptosi_nxv1f64_nxv1i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i32: ; CHECK: # %bb.0: @@ -1376,7 +1264,6 @@ define @vfptoui_nxv1f64_nxv1i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(, metadata) define @vfptosi_nxv1f64_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i64: ; CHECK: # %bb.0: @@ -1387,7 +1274,6 @@ define @vfptosi_nxv1f64_nxv1i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(, metadata) define @vfptoui_nxv1f64_nxv1i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i64: ; CHECK: # %bb.0: @@ -1398,7 +1284,6 @@ define @vfptoui_nxv1f64_nxv1i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i1: ; CHECK: # %bb.0: @@ -1411,7 +1296,6 @@ define @vfptosi_nxv2f64_nxv2i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i1: ; CHECK: # %bb.0: @@ -1424,7 +1308,6 @@ define @vfptoui_nxv2f64_nxv2i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -1439,7 +1322,6 @@ define @vfptosi_nxv2f64_nxv2i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -1454,7 +1336,6 @@ define @vfptoui_nxv2f64_nxv2i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -1467,7 +1348,6 @@ define @vfptosi_nxv2f64_nxv2i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -1480,7 +1360,6 @@ define @vfptoui_nxv2f64_nxv2i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -1492,7 +1371,6 @@ define @vfptosi_nxv2f64_nxv2i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -1504,7 +1382,6 @@ define @vfptoui_nxv2f64_nxv2i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(, metadata) define @vfptosi_nxv2f64_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -1515,7 +1392,6 @@ define @vfptosi_nxv2f64_nxv2i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(, metadata) define @vfptoui_nxv2f64_nxv2i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -1526,7 +1402,6 @@ define @vfptoui_nxv2f64_nxv2i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i1: ; CHECK: # %bb.0: @@ -1539,7 +1414,6 @@ define @vfptosi_nxv4f64_nxv4i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i1: ; CHECK: # %bb.0: @@ -1552,7 +1426,6 @@ define @vfptoui_nxv4f64_nxv4i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i8: ; CHECK: # %bb.0: @@ -1567,7 +1440,6 @@ define @vfptosi_nxv4f64_nxv4i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i8: ; CHECK: # %bb.0: @@ -1582,7 +1454,6 @@ define @vfptoui_nxv4f64_nxv4i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i16: ; CHECK: # %bb.0: @@ -1595,7 +1466,6 @@ define @vfptosi_nxv4f64_nxv4i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i16: ; CHECK: # %bb.0: @@ -1608,7 +1478,6 @@ define @vfptoui_nxv4f64_nxv4i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i32: ; CHECK: # %bb.0: @@ -1620,7 +1489,6 @@ define @vfptosi_nxv4f64_nxv4i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i32: ; CHECK: # %bb.0: @@ -1632,7 +1500,6 @@ define @vfptoui_nxv4f64_nxv4i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(, metadata) define @vfptosi_nxv4f64_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i64: ; CHECK: # %bb.0: @@ -1643,7 +1510,6 @@ define @vfptosi_nxv4f64_nxv4i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(, metadata) define @vfptoui_nxv4f64_nxv4i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i64: ; CHECK: # %bb.0: @@ -1654,7 +1520,6 @@ define @vfptoui_nxv4f64_nxv4i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i1: ; CHECK: # %bb.0: @@ -1667,7 +1532,6 @@ define @vfptosi_nxv8f64_nxv8i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i1( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i1: ; CHECK: # %bb.0: @@ -1680,7 +1544,6 @@ define @vfptoui_nxv8f64_nxv8i1( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i8: ; CHECK: # %bb.0: @@ -1695,7 +1558,6 @@ define @vfptosi_nxv8f64_nxv8i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i8( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i8: ; CHECK: # %bb.0: @@ -1710,7 +1572,6 @@ define @vfptoui_nxv8f64_nxv8i8( %va) stri ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i16: ; CHECK: # %bb.0: @@ -1723,7 +1584,6 @@ define @vfptosi_nxv8f64_nxv8i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i16( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i16: ; CHECK: # %bb.0: @@ -1736,7 +1596,6 @@ define @vfptoui_nxv8f64_nxv8i16( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i32: ; CHECK: # %bb.0: @@ -1748,7 +1607,6 @@ define @vfptosi_nxv8f64_nxv8i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i32( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i32: ; CHECK: # %bb.0: @@ -1760,7 +1618,6 @@ define @vfptoui_nxv8f64_nxv8i32( %va) st ret %evec } -declare @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(, metadata) define @vfptosi_nxv8f64_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i64: ; CHECK: # %bb.0: @@ -1771,7 +1628,6 @@ define @vfptosi_nxv8f64_nxv8i64( %va) st ret %evec } -declare @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(, metadata) define @vfptoui_nxv8f64_nxv8i64( %va) strictfp { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll index 33decd8aa1b91..7924bc83d824c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll @@ -30,8 +30,6 @@ define @vfptosi_nxv2i1_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i1.nxv2f16(, , i32) - define @vfptosi_nxv2i1_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i1_nxv2f16: ; ZVFH: # %bb.0: @@ -72,8 +70,6 @@ define @vfptosi_nxv2i1_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptosi.nxv2i1.nxv2f32(, , i32) - define @vfptosi_nxv2i1_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vfptosi_nxv2i1_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptosi.nxv2i1.nxv2f64(, , i32) - define @vfptosi_nxv2i1_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll index f94f709626443..7127d10e67dbc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -113,8 +113,6 @@ define @vfptosi_nxv2i64_nxv2bf16_unmasked( %v } -declare @llvm.vp.fptosi.v4i7.v4f16(, , i32) - define @vfptosi_v4i7_v4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -135,8 +133,6 @@ define @vfptosi_v4i7_v4f16( %va, %v } -declare @llvm.vp.fptosi.nxv2i8.nxv2f16(, , i32) - define @vfptosi_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i8_nxv2f16: ; ZVFH: # %bb.0: @@ -177,8 +173,6 @@ define @vfptosi_nxv2i8_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptosi.nxv2i16.nxv2f16(, , i32) - define @vfptosi_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i16_nxv2f16: ; ZVFH: # %bb.0: @@ -213,8 +207,6 @@ define @vfptosi_nxv2i16_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i32.nxv2f16(, , i32) - define @vfptosi_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i32_nxv2f16: ; ZVFH: # %bb.0: @@ -253,8 +245,6 @@ define @vfptosi_nxv2i32_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i64.nxv2f16(, , i32) - define @vfptosi_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptosi_nxv2i64_nxv2f16: ; ZVFH: # %bb.0: @@ -287,8 +277,6 @@ define @vfptosi_nxv2i64_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i8.nxv2f32(, , i32) - define @vfptosi_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -313,8 +301,6 @@ define @vfptosi_nxv2i8_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptosi.nxv2i16.nxv2f32(, , i32) - define @vfptosi_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -337,8 +323,6 @@ define @vfptosi_nxv2i16_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i32.nxv2f32(, , i32) - define @vfptosi_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -359,8 +343,6 @@ define @vfptosi_nxv2i32_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i64.nxv2f32(, , i32) - define @vfptosi_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -383,8 +365,6 @@ define @vfptosi_nxv2i64_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i8.nxv2f64(, , i32) - define @vfptosi_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -413,8 +393,6 @@ define @vfptosi_nxv2i8_nxv2f64_unmasked( ret %v } -declare @llvm.vp.fptosi.nxv2i16.nxv2f64(, , i32) - define @vfptosi_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -439,8 +417,6 @@ define @vfptosi_nxv2i16_nxv2f64_unmasked( %v } -declare @llvm.vp.fptosi.nxv2i32.nxv2f64(, , i32) - define @vfptosi_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -463,8 +439,6 @@ define @vfptosi_nxv2i32_nxv2f64_unmasked( %v } -declare @llvm.vp.fptosi.nxv2i64.nxv2f64(, , i32) - define @vfptosi_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -485,8 +459,6 @@ define @vfptosi_nxv2i64_nxv2f64_unmasked( %v } -declare @llvm.vp.fptosi.nxv32i16.nxv32f32(, , i32) - define @vfptosi_nxv32i16_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32: ; CHECK: # %bb.0: @@ -515,8 +487,6 @@ define @vfptosi_nxv32i16_nxv32f32( %va, ret %v } -declare @llvm.vp.fptosi.nxv32i32.nxv32f32(, , i32) - define @vfptosi_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll index 59c6791c12f79..dba3f6cec2da0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll @@ -30,8 +30,6 @@ define @vfptoui_nxv2i1_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i1.nxv2f16(, , i32) - define @vfptoui_nxv2i1_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i1_nxv2f16: ; ZVFH: # %bb.0: @@ -72,8 +70,6 @@ define @vfptoui_nxv2i1_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptoui.nxv2i1.nxv2f32(, , i32) - define @vfptoui_nxv2i1_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vfptoui_nxv2i1_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptoui.nxv2i1.nxv2f64(, , i32) - define @vfptoui_nxv2i1_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll index 7aae383049deb..07b58ed057508 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -113,8 +113,6 @@ define @vfptoui_nxv2i64_nxv2bf16_unmasked( %v } -declare @llvm.vp.fptoui.v4i7.v4f16(, , i32) - define @vfptoui_v4i7_v4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_v4i7_v4f16: ; ZVFH: # %bb.0: @@ -135,8 +133,6 @@ define @vfptoui_v4i7_v4f16( %va, %v } -declare @llvm.vp.fptoui.nxv2i8.nxv2f16(, , i32) - define @vfptoui_nxv2i8_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i8_nxv2f16: ; ZVFH: # %bb.0: @@ -177,8 +173,6 @@ define @vfptoui_nxv2i8_nxv2f16_unmasked( %v ret %v } -declare @llvm.vp.fptoui.nxv2i16.nxv2f16(, , i32) - define @vfptoui_nxv2i16_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i16_nxv2f16: ; ZVFH: # %bb.0: @@ -213,8 +207,6 @@ define @vfptoui_nxv2i16_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i32.nxv2f16(, , i32) - define @vfptoui_nxv2i32_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i32_nxv2f16: ; ZVFH: # %bb.0: @@ -253,8 +245,6 @@ define @vfptoui_nxv2i32_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i64.nxv2f16(, , i32) - define @vfptoui_nxv2i64_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16: ; ZVFH: # %bb.0: @@ -287,8 +277,6 @@ define @vfptoui_nxv2i64_nxv2f16_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i8.nxv2f32(, , i32) - define @vfptoui_nxv2i8_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -313,8 +301,6 @@ define @vfptoui_nxv2i8_nxv2f32_unmasked( % ret %v } -declare @llvm.vp.fptoui.nxv2i16.nxv2f32(, , i32) - define @vfptoui_nxv2i16_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -337,8 +323,6 @@ define @vfptoui_nxv2i16_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i32.nxv2f32(, , i32) - define @vfptoui_nxv2i32_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -359,8 +343,6 @@ define @vfptoui_nxv2i32_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i64.nxv2f32(, , i32) - define @vfptoui_nxv2i64_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -383,8 +365,6 @@ define @vfptoui_nxv2i64_nxv2f32_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i8.nxv2f64(, , i32) - define @vfptoui_nxv2i8_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -413,8 +393,6 @@ define @vfptoui_nxv2i8_nxv2f64_unmasked( ret %v } -declare @llvm.vp.fptoui.nxv2i16.nxv2f64(, , i32) - define @vfptoui_nxv2i16_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -439,8 +417,6 @@ define @vfptoui_nxv2i16_nxv2f64_unmasked( %v } -declare @llvm.vp.fptoui.nxv2i32.nxv2f64(, , i32) - define @vfptoui_nxv2i32_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -463,8 +439,6 @@ define @vfptoui_nxv2i32_nxv2f64_unmasked( %v } -declare @llvm.vp.fptoui.nxv2i64.nxv2f64(, , i32) - define @vfptoui_nxv2i64_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -485,8 +459,6 @@ define @vfptoui_nxv2i64_nxv2f64_unmasked( %v } -declare @llvm.vp.fptoui.nxv32i16.nxv32f32(, , i32) - define @vfptoui_nxv32i16_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32: ; CHECK: # %bb.0: @@ -515,8 +487,6 @@ define @vfptoui_nxv32i16_nxv32f32( %va, ret %v } -declare @llvm.vp.fptoui.nxv32i32.nxv32f32(, , i32) - define @vfptoui_nxv32i32_nxv32f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll index 566920d577ce1..b77589762017a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll @@ -8,7 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.fptrunc.nxv1f32.nxv1f64(, metadata, metadata) define @vfptrunc_nxv1f64_nxv1f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32: ; CHECK: # %bb.0: @@ -20,7 +19,6 @@ define @vfptrunc_nxv1f64_nxv1f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f64(, metadata, metadata) define @vfptrunc_nxv1f64_nxv1f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16: ; CHECK: # %bb.0: @@ -33,7 +31,6 @@ define @vfptrunc_nxv1f64_nxv1f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f32(, metadata, metadata) define @vfptrunc_nxv1f32_nxv1f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16: ; CHECK: # %bb.0: @@ -45,7 +42,6 @@ define @vfptrunc_nxv1f32_nxv1f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2f32.nxv2f64(, metadata, metadata) define @vfptrunc_nxv2f64_nxv2f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32: ; CHECK: # %bb.0: @@ -57,7 +53,6 @@ define @vfptrunc_nxv2f64_nxv2f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f64(, metadata, metadata) define @vfptrunc_nxv2f64_nxv2f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16: ; CHECK: # %bb.0: @@ -70,7 +65,6 @@ define @vfptrunc_nxv2f64_nxv2f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f32(, metadata, metadata) define @vfptrunc_nxv2f32_nxv2f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define @vfptrunc_nxv2f32_nxv2f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4f32.nxv4f64(, metadata, metadata) define @vfptrunc_nxv4f64_nxv4f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32: ; CHECK: # %bb.0: @@ -94,7 +87,6 @@ define @vfptrunc_nxv4f64_nxv4f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f64(, metadata, metadata) define @vfptrunc_nxv4f64_nxv4f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16: ; CHECK: # %bb.0: @@ -107,7 +99,6 @@ define @vfptrunc_nxv4f64_nxv4f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f32(, metadata, metadata) define @vfptrunc_nxv4f32_nxv4f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16: ; CHECK: # %bb.0: @@ -119,7 +110,6 @@ define @vfptrunc_nxv4f32_nxv4f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8f32.nxv8f64(, metadata, metadata) define @vfptrunc_nxv8f64_nxv8f32( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32: ; CHECK: # %bb.0: @@ -131,7 +121,6 @@ define @vfptrunc_nxv8f64_nxv8f32( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f64(, metadata, metadata) define @vfptrunc_nxv8f64_nxv8f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16: ; CHECK: # %bb.0: @@ -144,7 +133,6 @@ define @vfptrunc_nxv8f64_nxv8f16( %va) ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f32(, metadata, metadata) define @vfptrunc_nxv8f32_nxv8f16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16: ; CHECK: # %bb.0: @@ -156,7 +144,6 @@ define @vfptrunc_nxv8f32_nxv8f16( %va) s ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f64(, metadata, metadata) define @vfptrunc_nxv1f64_nxv1bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1bf16: ; CHECK: # %bb.0: @@ -169,7 +156,6 @@ define @vfptrunc_nxv1f64_nxv1bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv1bf16.nxv1f32(, metadata, metadata) define @vfptrunc_nxv1f32_nxv1bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: @@ -181,7 +167,6 @@ define @vfptrunc_nxv1f32_nxv1bf16( %va ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f64(, metadata, metadata) define @vfptrunc_nxv2f64_nxv2bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2bf16: ; CHECK: # %bb.0: @@ -194,7 +179,6 @@ define @vfptrunc_nxv2f64_nxv2bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv2bf16.nxv2f32(, metadata, metadata) define @vfptrunc_nxv2f32_nxv2bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: @@ -206,7 +190,6 @@ define @vfptrunc_nxv2f32_nxv2bf16( %va ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f64(, metadata, metadata) define @vfptrunc_nxv4f64_nxv4bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4bf16: ; CHECK: # %bb.0: @@ -219,7 +202,6 @@ define @vfptrunc_nxv4f64_nxv4bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv4bf16.nxv4f32(, metadata, metadata) define @vfptrunc_nxv4f32_nxv4bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: @@ -231,7 +213,6 @@ define @vfptrunc_nxv4f32_nxv4bf16( %va ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f64(, metadata, metadata) define @vfptrunc_nxv8f64_nxv8bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8bf16: ; CHECK: # %bb.0: @@ -244,7 +225,6 @@ define @vfptrunc_nxv8f64_nxv8bf16( %v ret %evec } -declare @llvm.experimental.constrained.fptrunc.nxv8bf16.nxv8f32(, metadata, metadata) define @vfptrunc_nxv8f32_nxv8bf16( %va) strictfp { ; CHECK-LABEL: vfptrunc_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll index 03de35c212296..4177672b3a306 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fptrunc.nxv2f16.nxv2f32(, , i32) - define @vfptrunc_nxv2f16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f32: ; CHECK: # %bb.0: @@ -28,8 +26,6 @@ define @vfptrunc_nxv2f16_nxv2f32_unmasked( %v } -declare @llvm.vp.fptrunc.nxv2f16.nxv2f64(, , i32) - define @vfptrunc_nxv2f16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f64: ; CHECK: # %bb.0: @@ -54,8 +50,6 @@ define @vfptrunc_nxv2f16_nxv2f64_unmasked( %v } -declare @llvm.vp.fptrunc.nxv2f64.nxv2f32(, , i32) - define @vfptrunc_nxv2f32_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f64: ; CHECK: # %bb.0: @@ -78,8 +72,6 @@ define @vfptrunc_nxv2f32_nxv2f64_unmasked( %v } -declare @llvm.vp.fptrunc.nxv7f64.nxv7f32(, , i32) - define @vfptrunc_nxv7f32_nxv7f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv7f32_nxv7f64: ; CHECK: # %bb.0: @@ -91,8 +83,6 @@ define @vfptrunc_nxv7f32_nxv7f64( %a, ret %v } -declare @llvm.vp.fptrunc.nxv16f64.nxv16f32(, , i32) - define @vfptrunc_nxv16f32_nxv16f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f64: ; CHECK: # %bb.0: @@ -120,8 +110,6 @@ define @vfptrunc_nxv16f32_nxv16f64( ret %v } -declare @llvm.vp.fptrunc.nxv32f64.nxv32f32(, , i32) - define @vfptrunc_nxv32f32_nxv32f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv32f32_nxv32f64: ; CHECK: # %bb.0: @@ -206,8 +194,6 @@ define @vfptrunc_nxv32f32_nxv32f64( ret %v } -declare @llvm.vp.fptrunc.nxv2bf16.nxv2f32(, , i32) - define @vfptrunc_nxv2bf16_nxv2f32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f32: ; CHECK: # %bb.0: @@ -230,8 +216,6 @@ define @vfptrunc_nxv2bf16_nxv2f32_unmasked( %v } -declare @llvm.vp.fptrunc.nxv2bf16.nxv2f64(, , i32) - define @vfptrunc_nxv2bf16_nxv2f64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll index e8688abc63a5d..42be051e35aae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fdiv.nxv1f16(, , , i32) - define @vfrdiv_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define @vfrdiv_vf_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv2f16(, , , i32) - define @vfrdiv_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define @vfrdiv_vf_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv4f16(, , , i32) - define @vfrdiv_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define @vfrdiv_vf_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv8f16(, , , i32) - define @vfrdiv_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfrdiv_vf_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.fdiv.nxv16f16(, , , i32) - define @vfrdiv_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f16: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define @vfrdiv_vf_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv32f16(, , , i32) - define @vfrdiv_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv32f16: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define @vfrdiv_vf_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.fdiv.nxv1f32(, , , i32) - define @vfrdiv_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define @vfrdiv_vf_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv2f32(, , , i32) - define @vfrdiv_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define @vfrdiv_vf_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv4f32(, , , i32) - define @vfrdiv_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f32: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define @vfrdiv_vf_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv8f32(, , , i32) - define @vfrdiv_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f32: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define @vfrdiv_vf_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.fdiv.nxv16f32(, , , i32) - define @vfrdiv_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f32: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define @vfrdiv_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fdiv.nxv1f64(, , , i32) - define @vfrdiv_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f64: ; CHECK: # %bb.0: @@ -316,8 +292,6 @@ define @vfrdiv_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv2f64(, , , i32) - define @vfrdiv_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f64: ; CHECK: # %bb.0: @@ -342,8 +316,6 @@ define @vfrdiv_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv4f64(, , , i32) - define @vfrdiv_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f64: ; CHECK: # %bb.0: @@ -368,8 +340,6 @@ define @vfrdiv_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fdiv.nxv8f64(, , , i32) - define @vfrdiv_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll index 6a2a6a4c4a0cd..e36786e423193 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrdiv.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll index 1211415ffe432..ed0290cfeac2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrec7.nxv1bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv16bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv32bf16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv32bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll index e185e8d568701..4d6efc7ca56ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrec7.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfrec7.mask.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll index f1ed95512741c..2da41ac1e3a91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredmax.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv4f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv2f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.nxv1f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll index 5dfa5a1f2b20e..d2b5f4a495104 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredmin.nxv4f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv4f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv2f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.nxv1f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen); - define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll index a85850b0c4504..81e5c96b6299f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredosum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv4f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv2f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.nxv1f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll index b3101450493e8..22d44f8341743 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll index 4626b865ab454..4ba0e4e3dcd63 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsqrt7.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv32bf16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv32bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll index db303dddc328e..bfaca190a45e4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsqrt7.nxv1f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv16f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv32f16( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv32f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv1f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv16f32( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv16f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv1f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv1f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv2f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv2f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv4f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv4f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.nxv8f64( - , - , - iXLen); - define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsqrt7.mask.nxv8f64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll index 54a6d48cfcc5b..ecd54fb090c14 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsub.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll index e2864ea30ec7b..2fc77437019e3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.fsub.nxv1f16(, , , i32) - define @vfrsub_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define @vfrsub_vf_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv2f16(, , , i32) - define @vfrsub_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define @vfrsub_vf_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv4f16(, , , i32) - define @vfrsub_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define @vfrsub_vf_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv8f16(, , , i32) - define @vfrsub_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f16: ; CHECK: # %bb.0: @@ -108,8 +100,6 @@ define @vfrsub_vf_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv16f16(, , , i32) - define @vfrsub_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f16: ; CHECK: # %bb.0: @@ -134,8 +124,6 @@ define @vfrsub_vf_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv32f16(, , , i32) - define @vfrsub_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv32f16: ; CHECK: # %bb.0: @@ -160,8 +148,6 @@ define @vfrsub_vf_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv1f32(, , , i32) - define @vfrsub_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f32: ; CHECK: # %bb.0: @@ -186,8 +172,6 @@ define @vfrsub_vf_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv2f32(, , , i32) - define @vfrsub_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f32: ; CHECK: # %bb.0: @@ -212,8 +196,6 @@ define @vfrsub_vf_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv4f32(, , , i32) - define @vfrsub_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f32: ; CHECK: # %bb.0: @@ -238,8 +220,6 @@ define @vfrsub_vf_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv8f32(, , , i32) - define @vfrsub_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f32: ; CHECK: # %bb.0: @@ -264,8 +244,6 @@ define @vfrsub_vf_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv16f32(, , , i32) - define @vfrsub_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f32: ; CHECK: # %bb.0: @@ -290,8 +268,6 @@ define @vfrsub_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fsub.nxv1f64(, , , i32) - define @vfrsub_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f64: ; CHECK: # %bb.0: @@ -316,8 +292,6 @@ define @vfrsub_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv2f64(, , , i32) - define @vfrsub_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f64: ; CHECK: # %bb.0: @@ -342,8 +316,6 @@ define @vfrsub_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv4f64(, , , i32) - define @vfrsub_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f64: ; CHECK: # %bb.0: @@ -368,8 +340,6 @@ define @vfrsub_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv8f64(, , , i32) - define @vfrsub_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll index c701016b7f772..d09027f34c080 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfrsub.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfrsub.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll index 2cd698d9aaa3c..605cb959134d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll index 242a826055c58..8949289ab68f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnj.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll index 08340becc9ed4..4d43b93e4bfd7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll index 25b99a1763e49..edd1bb65437cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll index e51a42e2b8cea..b31a3290477ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll index cc4c253a8b164..08a5b1f33a910 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll index c65719c3a4c1a..43cf64cdcf6f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1down.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll index e05e2160a377a..897004a0a806c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1down.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll index 57a48986fdfcd..860082bac85c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1up.nxv1bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv16bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv32bf16.bf16( - , - , - bfloat, - iXLen); - define @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll index 642b9dec459e3..02caf5014a180 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfslide1up.nxv1f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv16f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv32f16.f16( - , - , - half, - iXLen); - define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( - , - , - half, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv1f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv16f32.f32( - , - , - float, - iXLen); - define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( - , - , - float, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv1f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv2f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -603,14 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -628,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv4f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -651,14 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -676,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.nxv8f64.f64( - , - , - double, - iXLen); - define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -699,14 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( - , - , - double, - , - iXLen, - iXLen); - define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll index eeb5f3bc984d3..eeec0a75a2eb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll @@ -26,7 +26,6 @@ define @vfsqrt_nxv1bf16( %v) strictfp ret %r } - define @vfsqrt_nxv2bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv2bf16: ; CHECK: # %bb.0: @@ -41,7 +40,6 @@ define @vfsqrt_nxv2bf16( %v) strictfp ret %r } - define @vfsqrt_nxv4bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv4bf16: ; CHECK: # %bb.0: @@ -56,7 +54,6 @@ define @vfsqrt_nxv4bf16( %v) strictfp ret %r } - define @vfsqrt_nxv8bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv8bf16: ; CHECK: # %bb.0: @@ -71,7 +68,6 @@ define @vfsqrt_nxv8bf16( %v) strictfp ret %r } - define @vfsqrt_nxv16bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv16bf16: ; CHECK: # %bb.0: @@ -86,7 +82,6 @@ define @vfsqrt_nxv16bf16( %v) stric ret %r } - define @vfsqrt_nxv32bf16( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv32bf16: ; CHECK: # %bb.0: @@ -106,8 +101,6 @@ define @vfsqrt_nxv32bf16( %v) stric ret %r } -declare @llvm.experimental.constrained.sqrt.nxv1f16(, metadata, metadata) - define @vfsqrt_nxv1f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv1f16: ; ZVFH: # %bb.0: @@ -128,8 +121,6 @@ define @vfsqrt_nxv1f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv2f16(, metadata, metadata) - define @vfsqrt_nxv2f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv2f16: ; ZVFH: # %bb.0: @@ -150,8 +141,6 @@ define @vfsqrt_nxv2f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv4f16(, metadata, metadata) - define @vfsqrt_nxv4f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv4f16: ; ZVFH: # %bb.0: @@ -172,8 +161,6 @@ define @vfsqrt_nxv4f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv8f16(, metadata, metadata) - define @vfsqrt_nxv8f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv8f16: ; ZVFH: # %bb.0: @@ -194,8 +181,6 @@ define @vfsqrt_nxv8f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv16f16(, metadata, metadata) - define @vfsqrt_nxv16f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv16f16: ; ZVFH: # %bb.0: @@ -216,8 +201,6 @@ define @vfsqrt_nxv16f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv32f16(, metadata, metadata) - define @vfsqrt_nxv32f16( %v) strictfp { ; ZVFH-LABEL: vfsqrt_nxv32f16: ; ZVFH: # %bb.0: @@ -243,8 +226,6 @@ define @vfsqrt_nxv32f16( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv1f32(, metadata, metadata) - define @vfsqrt_nxv1f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv1f32: ; CHECK: # %bb.0: @@ -255,8 +236,6 @@ define @vfsqrt_nxv1f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv2f32(, metadata, metadata) - define @vfsqrt_nxv2f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv2f32: ; CHECK: # %bb.0: @@ -267,8 +246,6 @@ define @vfsqrt_nxv2f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv4f32(, metadata, metadata) - define @vfsqrt_nxv4f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv4f32: ; CHECK: # %bb.0: @@ -279,8 +256,6 @@ define @vfsqrt_nxv4f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv8f32(, metadata, metadata) - define @vfsqrt_nxv8f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv8f32: ; CHECK: # %bb.0: @@ -291,8 +266,6 @@ define @vfsqrt_nxv8f32( %v) strictfp { ret %r } -declare @llvm.experimental.constrained.sqrt.nxv16f32(, metadata, metadata) - define @vfsqrt_nxv16f32( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv16f32: ; CHECK: # %bb.0: @@ -303,8 +276,6 @@ define @vfsqrt_nxv16f32( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv1f64(, metadata, metadata) - define @vfsqrt_nxv1f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv1f64: ; CHECK: # %bb.0: @@ -315,8 +286,6 @@ define @vfsqrt_nxv1f64( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv2f64(, metadata, metadata) - define @vfsqrt_nxv2f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv2f64: ; CHECK: # %bb.0: @@ -327,8 +296,6 @@ define @vfsqrt_nxv2f64( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv4f64(, metadata, metadata) - define @vfsqrt_nxv4f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv4f64: ; CHECK: # %bb.0: @@ -339,8 +306,6 @@ define @vfsqrt_nxv4f64( %v) strictfp ret %r } -declare @llvm.experimental.constrained.sqrt.nxv8f64(, metadata, metadata) - define @vfsqrt_nxv8f64( %v) strictfp { ; CHECK-LABEL: vfsqrt_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll index 6d7662db2b157..114842b4ef87e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll @@ -101,8 +101,6 @@ define @vfsqrt_nxv32bf16( %v) { ret %r } -declare @llvm.sqrt.nxv1f16() - define @vfsqrt_nxv1f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv1f16: ; ZVFH: # %bb.0: @@ -123,8 +121,6 @@ define @vfsqrt_nxv1f16( %v) { ret %r } -declare @llvm.sqrt.nxv2f16() - define @vfsqrt_nxv2f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv2f16: ; ZVFH: # %bb.0: @@ -145,8 +141,6 @@ define @vfsqrt_nxv2f16( %v) { ret %r } -declare @llvm.sqrt.nxv4f16() - define @vfsqrt_nxv4f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv4f16: ; ZVFH: # %bb.0: @@ -167,8 +161,6 @@ define @vfsqrt_nxv4f16( %v) { ret %r } -declare @llvm.sqrt.nxv8f16() - define @vfsqrt_nxv8f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv8f16: ; ZVFH: # %bb.0: @@ -189,8 +181,6 @@ define @vfsqrt_nxv8f16( %v) { ret %r } -declare @llvm.sqrt.nxv16f16() - define @vfsqrt_nxv16f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv16f16: ; ZVFH: # %bb.0: @@ -211,8 +201,6 @@ define @vfsqrt_nxv16f16( %v) { ret %r } -declare @llvm.sqrt.nxv32f16() - define @vfsqrt_nxv32f16( %v) { ; ZVFH-LABEL: vfsqrt_nxv32f16: ; ZVFH: # %bb.0: @@ -238,8 +226,6 @@ define @vfsqrt_nxv32f16( %v) { ret %r } -declare @llvm.sqrt.nxv1f32() - define @vfsqrt_nxv1f32( %v) { ; CHECK-LABEL: vfsqrt_nxv1f32: ; CHECK: # %bb.0: @@ -250,8 +236,6 @@ define @vfsqrt_nxv1f32( %v) { ret %r } -declare @llvm.sqrt.nxv2f32() - define @vfsqrt_nxv2f32( %v) { ; CHECK-LABEL: vfsqrt_nxv2f32: ; CHECK: # %bb.0: @@ -262,8 +246,6 @@ define @vfsqrt_nxv2f32( %v) { ret %r } -declare @llvm.sqrt.nxv4f32() - define @vfsqrt_nxv4f32( %v) { ; CHECK-LABEL: vfsqrt_nxv4f32: ; CHECK: # %bb.0: @@ -274,8 +256,6 @@ define @vfsqrt_nxv4f32( %v) { ret %r } -declare @llvm.sqrt.nxv8f32() - define @vfsqrt_nxv8f32( %v) { ; CHECK-LABEL: vfsqrt_nxv8f32: ; CHECK: # %bb.0: @@ -286,8 +266,6 @@ define @vfsqrt_nxv8f32( %v) { ret %r } -declare @llvm.sqrt.nxv16f32() - define @vfsqrt_nxv16f32( %v) { ; CHECK-LABEL: vfsqrt_nxv16f32: ; CHECK: # %bb.0: @@ -298,8 +276,6 @@ define @vfsqrt_nxv16f32( %v) { ret %r } -declare @llvm.sqrt.nxv1f64() - define @vfsqrt_nxv1f64( %v) { ; CHECK-LABEL: vfsqrt_nxv1f64: ; CHECK: # %bb.0: @@ -310,8 +286,6 @@ define @vfsqrt_nxv1f64( %v) { ret %r } -declare @llvm.sqrt.nxv2f64() - define @vfsqrt_nxv2f64( %v) { ; CHECK-LABEL: vfsqrt_nxv2f64: ; CHECK: # %bb.0: @@ -322,8 +296,6 @@ define @vfsqrt_nxv2f64( %v) { ret %r } -declare @llvm.sqrt.nxv4f64() - define @vfsqrt_nxv4f64( %v) { ; CHECK-LABEL: vfsqrt_nxv4f64: ; CHECK: # %bb.0: @@ -334,8 +306,6 @@ define @vfsqrt_nxv4f64( %v) { ret %r } -declare @llvm.sqrt.nxv8f64() - define @vfsqrt_nxv8f64( %v) { ; CHECK-LABEL: vfsqrt_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll index 4336b27eb134a..451b13edb794e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.sqrt.nxv1bf16(, , i32) - define @vfsqrt_vv_nxv1bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -42,8 +40,6 @@ define @vfsqrt_vv_nxv1bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv2bf16(, , i32) - define @vfsqrt_vv_nxv2bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define @vfsqrt_vv_nxv2bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv4bf16(, , i32) - define @vfsqrt_vv_nxv4bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -102,8 +96,6 @@ define @vfsqrt_vv_nxv4bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv8bf16(, , i32) - define @vfsqrt_vv_nxv8bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -132,8 +124,6 @@ define @vfsqrt_vv_nxv8bf16_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv16bf16(, , i32) - define @vfsqrt_vv_nxv16bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -162,8 +152,6 @@ define @vfsqrt_vv_nxv16bf16_unmasked( %v } -declare @llvm.vp.sqrt.nxv32bf16(, , i32) - define @vfsqrt_vv_nxv32bf16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -233,7 +221,6 @@ define @vfsqrt_vv_nxv32bf16_unmasked( @llvm.vp.sqrt.nxv32bf16( %va, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.sqrt.nxv1f16(, , i32) define @vfsqrt_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv1f16: @@ -275,8 +262,6 @@ define @vfsqrt_vv_nxv1f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv2f16(, , i32) - define @vfsqrt_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -317,8 +302,6 @@ define @vfsqrt_vv_nxv2f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv4f16(, , i32) - define @vfsqrt_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -359,8 +342,6 @@ define @vfsqrt_vv_nxv4f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv8f16(, , i32) - define @vfsqrt_vv_nxv8f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -401,8 +382,6 @@ define @vfsqrt_vv_nxv8f16_unmasked( %va, ret %v } -declare @llvm.vp.sqrt.nxv16f16(, , i32) - define @vfsqrt_vv_nxv16f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -443,8 +422,6 @@ define @vfsqrt_vv_nxv16f16_unmasked( %v ret %v } -declare @llvm.vp.sqrt.nxv32f16(, , i32) - define @vfsqrt_vv_nxv32f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsqrt_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -527,8 +504,6 @@ define @vfsqrt_vv_nxv32f16_unmasked( %v ret %v } -declare @llvm.vp.sqrt.nxv1f32(, , i32) - define @vfsqrt_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f32: ; CHECK: # %bb.0: @@ -549,8 +524,6 @@ define @vfsqrt_vv_nxv1f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv2f32(, , i32) - define @vfsqrt_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f32: ; CHECK: # %bb.0: @@ -571,8 +544,6 @@ define @vfsqrt_vv_nxv2f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv4f32(, , i32) - define @vfsqrt_vv_nxv4f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f32: ; CHECK: # %bb.0: @@ -593,8 +564,6 @@ define @vfsqrt_vv_nxv4f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv8f32(, , i32) - define @vfsqrt_vv_nxv8f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f32: ; CHECK: # %bb.0: @@ -615,8 +584,6 @@ define @vfsqrt_vv_nxv8f32_unmasked( %va ret %v } -declare @llvm.vp.sqrt.nxv16f32(, , i32) - define @vfsqrt_vv_nxv16f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f32: ; CHECK: # %bb.0: @@ -637,8 +604,6 @@ define @vfsqrt_vv_nxv16f32_unmasked( ret %v } -declare @llvm.vp.sqrt.nxv1f64(, , i32) - define @vfsqrt_vv_nxv1f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f64: ; CHECK: # %bb.0: @@ -659,8 +624,6 @@ define @vfsqrt_vv_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv2f64(, , i32) - define @vfsqrt_vv_nxv2f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f64: ; CHECK: # %bb.0: @@ -681,8 +644,6 @@ define @vfsqrt_vv_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv4f64(, , i32) - define @vfsqrt_vv_nxv4f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f64: ; CHECK: # %bb.0: @@ -703,8 +664,6 @@ define @vfsqrt_vv_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv7f64(, , i32) - define @vfsqrt_vv_nxv7f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv7f64: ; CHECK: # %bb.0: @@ -725,8 +684,6 @@ define @vfsqrt_vv_nxv7f64_unmasked( % ret %v } -declare @llvm.vp.sqrt.nxv8f64(, , i32) - define @vfsqrt_vv_nxv8f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f64: ; CHECK: # %bb.0: @@ -748,7 +705,6 @@ define @vfsqrt_vv_nxv8f64_unmasked( % } ; Test splitting. -declare @llvm.vp.sqrt.nxv16f64(, , i32) define @vfsqrt_vv_nxv16f64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll index 6e495afe25639..c8d5060a38065 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsqrt.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,11 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,12 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,12 +126,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +161,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,11 +179,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv32f16( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,12 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv32f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -280,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -302,12 +231,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -326,11 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -348,12 +266,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -372,11 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -394,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -418,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -440,12 +336,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -464,11 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv16f32( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -486,12 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv16f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -510,11 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv1f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -532,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv1f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -556,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv2f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -578,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv2f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -602,11 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv4f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -624,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv4f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -648,11 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.nxv8f64( - , - , - iXLen, iXLen); - define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -670,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vfsqrt.mask.nxv8f64( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll index aea75211b70b5..b568c19de0edd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -281,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -303,13 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll index cd8f890251c77..d50b8c3c0e81a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll @@ -286,7 +286,6 @@ define @vfsub_vf_nxv32bf16( %va, bf ret %vc } -declare @llvm.experimental.constrained.fsub.nxv1f16(, , metadata, metadata) define @vfsub_vv_nxv1f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv1f16: ; ZVFH: # %bb.0: # %entry @@ -334,7 +333,6 @@ define @vfsub_vf_nxv1f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv2f16(, , metadata, metadata) define @vfsub_vv_nxv2f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv2f16: ; ZVFH: # %bb.0: # %entry @@ -382,7 +380,6 @@ define @vfsub_vf_nxv2f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv4f16(, , metadata, metadata) define @vfsub_vv_nxv4f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv4f16: ; ZVFH: # %bb.0: # %entry @@ -430,7 +427,6 @@ define @vfsub_vf_nxv4f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv8f16(, , metadata, metadata) define @vfsub_vv_nxv8f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv8f16: ; ZVFH: # %bb.0: # %entry @@ -503,7 +499,6 @@ define @vfsub_fv_nxv8f16( %va, half %b) s ret %vc } -declare @llvm.experimental.constrained.fsub.nxv16f16(, , metadata, metadata) define @vfsub_vv_nxv16f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv16f16: ; ZVFH: # %bb.0: # %entry @@ -551,7 +546,6 @@ define @vfsub_vf_nxv16f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv32f16(, , metadata, metadata) define @vfsub_vv_nxv32f16( %va, %vb) strictfp { ; ZVFH-LABEL: vfsub_vv_nxv32f16: ; ZVFH: # %bb.0: # %entry @@ -644,7 +638,6 @@ define @vfsub_vf_nxv32f16( %va, half %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv1f32(, , metadata, metadata) define @vfsub_vv_nxv1f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -668,7 +661,6 @@ define @vfsub_vf_nxv1f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv2f32(, , metadata, metadata) define @vfsub_vv_nxv2f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -692,7 +684,6 @@ define @vfsub_vf_nxv2f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv4f32(, , metadata, metadata) define @vfsub_vv_nxv4f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -716,7 +707,6 @@ define @vfsub_vf_nxv4f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv8f32(, , metadata, metadata) define @vfsub_vv_nxv8f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -752,7 +742,6 @@ define @vfsub_fv_nxv8f32( %va, float %b ret %vc } -declare @llvm.experimental.constrained.fsub.nxv16f32(, , metadata, metadata) define @vfsub_vv_nxv16f32( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,7 +765,6 @@ define @vfsub_vf_nxv16f32( %va, float ret %vc } -declare @llvm.experimental.constrained.fsub.nxv1f64(, , metadata, metadata) define @vfsub_vv_nxv1f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -800,7 +788,6 @@ define @vfsub_vf_nxv1f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fsub.nxv2f64(, , metadata, metadata) define @vfsub_vv_nxv2f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -824,7 +811,6 @@ define @vfsub_vf_nxv2f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fsub.nxv4f64(, , metadata, metadata) define @vfsub_vv_nxv4f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -848,7 +834,6 @@ define @vfsub_vf_nxv4f64( %va, double ret %vc } -declare @llvm.experimental.constrained.fsub.nxv8f64(, , metadata, metadata) define @vfsub_vv_nxv8f64( %va, %vb) strictfp { ; CHECK-LABEL: vfsub_vv_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll index dc0bfbd0f76dd..6637aced3cdac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -12,8 +12,6 @@ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fsub.nxv1bf16(, , , i32) - define @vfsub_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -82,8 +80,6 @@ define @vfsub_vf_nxv1bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv2bf16(, , , i32) - define @vfsub_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -152,8 +148,6 @@ define @vfsub_vf_nxv2bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv4bf16(, , , i32) - define @vfsub_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -222,8 +216,6 @@ define @vfsub_vf_nxv4bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv8bf16(, , , i32) - define @vfsub_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -292,8 +284,6 @@ define @vfsub_vf_nxv8bf16_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv16bf16(, , , i32) - define @vfsub_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -362,8 +352,6 @@ define @vfsub_vf_nxv16bf16_unmasked( %v } -declare @llvm.vp.fsub.nxv32bf16(, , , i32) - define @vfsub_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv32bf16: ; CHECK: # %bb.0: @@ -594,7 +582,6 @@ define @vfsub_vf_nxv32bf16_unmasked( @llvm.vp.fsub.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) ret %v } -declare @llvm.vp.fsub.nxv1f16(, , , i32) define @vfsub_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv1f16: @@ -688,8 +675,6 @@ define @vfsub_vf_nxv1f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv2f16(, , , i32) - define @vfsub_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv2f16: ; ZVFH: # %bb.0: @@ -782,8 +767,6 @@ define @vfsub_vf_nxv2f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv4f16(, , , i32) - define @vfsub_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv4f16: ; ZVFH: # %bb.0: @@ -876,8 +859,6 @@ define @vfsub_vf_nxv4f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv8f16(, , , i32) - define @vfsub_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv8f16: ; ZVFH: # %bb.0: @@ -970,8 +951,6 @@ define @vfsub_vf_nxv8f16_unmasked( %va, h ret %v } -declare @llvm.vp.fsub.nxv16f16(, , , i32) - define @vfsub_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv16f16: ; ZVFH: # %bb.0: @@ -1064,8 +1043,6 @@ define @vfsub_vf_nxv16f16_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv32f16(, , , i32) - define @vfsub_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfsub_vv_nxv32f16: ; ZVFH: # %bb.0: @@ -1321,8 +1298,6 @@ define @vfsub_vf_nxv32f16_unmasked( %va ret %v } -declare @llvm.vp.fsub.nxv1f32(, , , i32) - define @vfsub_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1367,8 +1342,6 @@ define @vfsub_vf_nxv1f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv2f32(, , , i32) - define @vfsub_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1413,8 +1386,6 @@ define @vfsub_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv4f32(, , , i32) - define @vfsub_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1459,8 +1430,6 @@ define @vfsub_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv8f32(, , , i32) - define @vfsub_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1505,8 +1474,6 @@ define @vfsub_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fsub.nxv16f32(, , , i32) - define @vfsub_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1551,8 +1518,6 @@ define @vfsub_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fsub.nxv1f64(, , , i32) - define @vfsub_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1597,8 +1562,6 @@ define @vfsub_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv2f64(, , , i32) - define @vfsub_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1643,8 +1606,6 @@ define @vfsub_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv4f64(, , , i32) - define @vfsub_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1689,8 +1650,6 @@ define @vfsub_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fsub.nxv7f64(, , , i32) - define @vfsub_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv7f64: ; CHECK: # %bb.0: @@ -1701,8 +1660,6 @@ define @vfsub_vv_nxv7f64( %va, %v } -declare @llvm.vp.fsub.nxv8f64(, , , i32) - define @vfsub_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll index 41ebfc50ed475..70b6b58e28844 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -281,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -303,13 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -512,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -558,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -580,13 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( - , - , - , - iXLen, iXLen); - define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -697,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -719,13 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -743,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -765,13 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -789,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -811,13 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv32f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv32f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -1019,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1041,13 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -1065,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -1111,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1133,13 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1157,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv16f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1179,13 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv16f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry @@ -1203,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv1f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1225,13 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv1f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1249,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv2f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1271,13 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv2f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1295,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv4f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1317,13 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv4f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1341,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.nxv8f64.f64( - , - , - double, - iXLen, iXLen); - define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry @@ -1363,13 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vfsub.mask.nxv8f64.f64( - , - , - double, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll index 62feac824efad..14edc5a57effc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -338,13 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -364,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +330,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -466,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -491,13 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll index f7297927db717..2437b127d594a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll @@ -92,6 +92,3 @@ bb: ret %tmp4 } -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) -declare @llvm.vp.fadd.nxv2f32(, , , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll index c5417e826bf41..2d130f9e6c2ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll index 5b541562978b8..4ca4a4d451bd1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,13 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -417,12 +313,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -443,13 +333,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -469,12 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -494,13 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -520,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -545,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -571,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -596,13 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -622,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -647,13 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -673,12 +504,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -698,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -724,12 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -749,13 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -775,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -800,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -826,12 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -851,13 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -877,12 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -902,13 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll index 5e92ab1e290e2..7319301b82f91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -456,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -480,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -506,12 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -530,13 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv16f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv1f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv2f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -780,13 +579,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -806,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv4f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -830,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -856,12 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.nxv8f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry @@ -880,13 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll index b7df45bad36e6..01344dcd3bb77 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll index c370261a77bc0..9f513969feae6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll index 354cf37591a19..b51c8efca9f7c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -7,10 +7,6 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( - , - , - iXLen); define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: @@ -28,12 +24,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -50,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -71,12 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -93,11 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -114,12 +88,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +104,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,12 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -179,11 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -200,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -222,11 +168,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -243,12 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -265,11 +200,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -286,12 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -308,11 +232,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -329,12 +248,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -351,11 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -372,12 +280,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll index 317ebe3e558db..ebb1a74a806a2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -283,12 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -305,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -369,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -412,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -455,12 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -498,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -541,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -584,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -627,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll index 59800f9672289..41dfcadcef017 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -25,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -68,12 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -111,12 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -154,12 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -283,12 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -305,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -369,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -412,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -455,12 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -498,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -541,12 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -584,12 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( - , - , - iXLen); - define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -627,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll index c33af8df6f1c6..d12e839856beb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll index a6a8b99f24217..e216cb9f601fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32( - , - , - iXLen); - define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll index 90a93116bd634..409bc28659b6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll index d7b1d97d059c1..c513ae4a95102 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -27,12 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -51,11 +40,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -74,12 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -121,12 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -145,11 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -168,12 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -192,11 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -215,12 +166,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -239,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -262,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -286,11 +220,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -309,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -333,11 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -356,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -380,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( - , - , - iXLen, iXLen); - define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -403,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll index 6e11a222c713c..349d289339df2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv1f32.nxv1bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv1f32_nxv1bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv1f32.nxv1bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv1f32_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv2f32.nxv2bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv2f32_nxv2bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv2f32.nxv2bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv2f32_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv4f32.nxv4bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv4f32_nxv4bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv4f32.nxv4bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv4f32_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv8f32.nxv8bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv8f32_nxv8bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv8f32.nxv8bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv8f32_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.nxv16f32.nxv16bf16( - , - , - iXLen); - define @intrinsic_vfwcvtbf16_f.f.v_nxv16f32_nxv16bf16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_f.f.v_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vfwcvtbf16.f.f.v.mask.nxv16f32.nxv16bf16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vfwcvtbf16_mask_f.f.v_nxv16f32_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvtbf16_mask_f.f.v_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll index 63113b8780989..eb5078c6f0862 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.fma.v1f32(, , ) - define @vfwmacc_vv_nxv1f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -243,8 +241,6 @@ define @vfwnmsac_fv_nxv1f32( %va, %vg } -declare @llvm.fma.v2f32(, , ) - define @vfwmacc_vv_nxv2f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -478,9 +474,6 @@ define @vfwnmsac_fv_nxv2f32( %va, %vg } - -declare @llvm.fma.v4f32(, , ) - define @vfwmacc_vv_nxv4f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -714,8 +707,6 @@ define @vfwnmsac_fv_nxv4f32( %va, %vg } -declare @llvm.fma.v8f32(, , ) - define @vfwmacc_vv_nxv8f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -949,8 +940,6 @@ define @vfwnmsac_fv_nxv8f32( %va, %vg } -declare @llvm.fma.v16f32(, , ) - define @vfwmacc_vv_nxv16f32( %va, %vb, %vc) { ; ZVFH-LABEL: vfwmacc_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -1184,8 +1173,6 @@ define @vfwnmsac_fv_nxv16f32( %va, %vg } -declare @llvm.fma.v1f64(, , ) - define @vfwmacc_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1329,8 +1316,6 @@ define @vfwnmsac_fv_nxv1f64( %va, %vg } -declare @llvm.fma.v2f64(, , ) - define @vfwmacc_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1474,9 +1459,6 @@ define @vfwnmsac_fv_nxv2f64( %va, %vg } - -declare @llvm.fma.v4f64(, , ) - define @vfwmacc_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1620,8 +1602,6 @@ define @vfwnmsac_fv_nxv4f64( %va, %vg } -declare @llvm.fma.v8f64(, , ) - define @vfwmacc_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll index 4ef7ea5b52a75..2e8e05fd8ce4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll @@ -8,11 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) - define @vfmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -295,12 +290,6 @@ define @vfmacc_vf_nxv1f32_unmasked_tu( % ret %u } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.merge.nxv2f32(, , , i32) -declare @llvm.vp.select.nxv2f32(, , , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -397,12 +386,6 @@ define @vfmacc_vf_nxv2f32_unmasked( %va, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.merge.nxv4f32(, , , i32) -declare @llvm.vp.select.nxv4f32(, , , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vfmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -501,12 +484,6 @@ define @vfmacc_vf_nxv4f32_unmasked( %va, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.merge.nxv8f32(, , , i32) -declare @llvm.vp.select.nxv8f32(, , , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vfmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -605,12 +582,6 @@ define @vfmacc_vf_nxv8f32_unmasked( %va, ret %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.merge.nxv16f32(, , , i32) -declare @llvm.vp.select.nxv16f32(, , , i32) -declare @llvm.vp.fpext.nxv16f32.nxv16f16(, , i32) - define @vfmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfmacc_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -709,12 +680,6 @@ define @vfmacc_vf_nxv16f32_unmasked( % ret %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.merge.nxv1f64(, , , i32) -declare @llvm.vp.select.nxv1f64(, , , i32) -declare @llvm.vp.fpext.nxv1f64.nxv1f32(, , i32) - define @vfmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -771,12 +736,6 @@ define @vfmacc_vf_nxv1f64_unmasked( %v ret %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.merge.nxv2f64(, , , i32) -declare @llvm.vp.select.nxv2f64(, , , i32) -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -833,12 +792,6 @@ define @vfmacc_vf_nxv2f64_unmasked( %v ret %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.merge.nxv4f64(, , , i32) -declare @llvm.vp.select.nxv4f64(, , , i32) -declare @llvm.vp.fpext.nxv4f64.nxv4f32(, , i32) - define @vfmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -895,12 +848,6 @@ define @vfmacc_vf_nxv4f64_unmasked( %v ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.merge.nxv8f64(, , , i32) -declare @llvm.vp.select.nxv8f64(, , , i32) -declare @llvm.vp.fpext.nxv8f64.nxv8f32(, , i32) - define @vfmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f64: ; CHECK: # %bb.0: @@ -957,8 +904,6 @@ define @vfmacc_vf_nxv8f64_unmasked( %v ret %v } -declare @llvm.vp.fpext.nxv1f64.nxv1f16(, , i32) - define @vfmacc_vv_nxv1f64_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64_nxv1f16: ; CHECK: # %bb.0: @@ -991,8 +936,6 @@ define @vfmacc_vv_nxv1f64_nxv1f16_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f16(, , i32) - define @vfmacc_vv_nxv2f64_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64_nxv2f16: ; CHECK: # %bb.0: @@ -1025,8 +968,6 @@ define @vfmacc_vv_nxv2f64_nxv2f16_unmasked( %v } -declare @llvm.vp.fpext.nxv4f64.nxv4f16(, , i32) - define @vfmacc_vv_nxv4f64_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64_nxv4f16: ; CHECK: # %bb.0: @@ -1059,8 +1000,6 @@ define @vfmacc_vv_nxv4f64_nxv4f16_unmasked( %v } -declare @llvm.vp.fpext.nxv8f64.nxv8f16(, , i32) - define @vfmacc_vv_nxv8f64_nxv8f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f64_nxv8f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll index 354f169561735..aad4b8b19884a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmacc.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll index 965c3d2c5d715..5dfbc8aa04de2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfbfmin,+zvfbfwma \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmaccbf16.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmaccbf16.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmaccbf16_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll index a3f667818ab0a..0425399c43ac8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll index 36f8e99b27383..68b803fd53721 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll @@ -8,11 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) -declare @llvm.vp.merge.nxv1f32(, , , i32) - define @vmfsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -191,10 +186,6 @@ define @vmfsac_vf_nxv1f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vmfsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -323,10 +314,6 @@ define @vmfsac_vf_nxv2f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vmfsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -457,10 +444,6 @@ define @vmfsac_vf_nxv4f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vmfsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vmfsac_vv_nxv8f32: ; ZVFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll index bd0d616fa6176..491e57475cf21 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmsac.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll index 577b93af7a918..858ba09f47b14 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -338,13 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -364,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +330,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -466,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -491,13 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll index f3a2bbd2ea140..975032b88568a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,13 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -417,12 +313,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -443,13 +333,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -469,12 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -494,13 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -520,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -545,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -571,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -596,13 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -622,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -647,13 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -673,12 +504,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -698,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -724,12 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -749,13 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -775,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -800,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -826,12 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -851,13 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -877,12 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -902,13 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll index 1e05e4c7acf25..152e7041a466f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll index fa328356ab585..2b540d775e500 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll @@ -8,10 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) - define @vfnmacc_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -146,10 +142,6 @@ define @vfnmacc_vf_nxv1f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfnmacc_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -284,10 +276,6 @@ define @vfnmacc_vf_nxv2f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vfnmacc_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -424,10 +412,6 @@ define @vfnmacc_vf_nxv4f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vfnmacc_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -564,10 +548,6 @@ define @vfnmacc_vf_nxv8f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.fpext.nxv16f32.nxv16f16(, , i32) - define @vfnmacc_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmacc_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -704,10 +684,6 @@ define @vfnmacc_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.fpext.nxv1f64.nxv1f32(, , i32) - define @vfnmacc_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64: ; CHECK: # %bb.0: @@ -790,10 +766,6 @@ define @vfnmacc_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfnmacc_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64: ; CHECK: # %bb.0: @@ -876,10 +848,6 @@ define @vfnmacc_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.fpext.nxv4f64.nxv4f32(, , i32) - define @vfnmacc_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64: ; CHECK: # %bb.0: @@ -962,10 +930,6 @@ define @vfnmacc_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.fpext.nxv8f64.nxv8f32(, , i32) - define @vfnmacc_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f64: ; CHECK: # %bb.0: @@ -1048,8 +1012,6 @@ define @vfnmacc_vf_nxv8f64_unmasked( % ret %v } -declare @llvm.vp.fpext.nxv1f64.nxv1f16(, , i32) - define @vfnmacc_vv_nxv1f64_nxv1f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64_nxv1f16: ; CHECK: # %bb.0: @@ -1086,8 +1048,6 @@ define @vfnmacc_vv_nxv1f64_nxv1f16_unmasked( %v } -declare @llvm.vp.fpext.nxv2f64.nxv2f16(, , i32) - define @vfnmacc_vv_nxv2f64_nxv2f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64_nxv2f16: ; CHECK: # %bb.0: @@ -1124,8 +1084,6 @@ define @vfnmacc_vv_nxv2f64_nxv2f16_unmasked( %v } -declare @llvm.vp.fpext.nxv4f64.nxv4f16(, , i32) - define @vfnmacc_vv_nxv4f64_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64_nxv4f16: ; CHECK: # %bb.0: @@ -1162,8 +1120,6 @@ define @vfnmacc_vv_nxv4f64_nxv4f16_unmasked( %v } -declare @llvm.vp.fpext.nxv8f64.nxv8f16(, , i32) - define @vfnmacc_vv_nxv8f64_nxv4f16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f64_nxv4f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll index e1db8cb722760..2b7362cbc2f4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll index 223ad4f7483f6..c7f7b7102a500 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.bf16( - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16( - , - bfloat, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll index cc0ae35780a60..ef00edb5a5a53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll @@ -8,10 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -declare @llvm.vp.fma.nxv1f32(, , , , i32) -declare @llvm.vp.fneg.nxv1f32(, , i32) -declare @llvm.vp.fpext.nxv1f32.nxv1f16(, , i32) - define @vfnmsac_vv_nxv1f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv1f32: ; ZVFH: # %bb.0: @@ -140,10 +136,6 @@ define @vfnmsac_vf_nxv1f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv2f32(, , , , i32) -declare @llvm.vp.fneg.nxv2f32(, , i32) -declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) - define @vfnmsac_vv_nxv2f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv2f32: ; ZVFH: # %bb.0: @@ -272,10 +264,6 @@ define @vfnmsac_vf_nxv2f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv4f32(, , , , i32) -declare @llvm.vp.fneg.nxv4f32(, , i32) -declare @llvm.vp.fpext.nxv4f32.nxv4f16(, , i32) - define @vfnmsac_vv_nxv4f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv4f32: ; ZVFH: # %bb.0: @@ -406,10 +394,6 @@ define @vfnmsac_vf_nxv4f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv8f32(, , , , i32) -declare @llvm.vp.fneg.nxv8f32(, , i32) -declare @llvm.vp.fpext.nxv8f32.nxv8f16(, , i32) - define @vfnmsac_vv_nxv8f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv8f32: ; ZVFH: # %bb.0: @@ -540,10 +524,6 @@ define @vfnmsac_vf_nxv8f32_unmasked( %a, ret %v } -declare @llvm.vp.fma.nxv16f32(, , , , i32) -declare @llvm.vp.fneg.nxv16f32(, , i32) -declare @llvm.vp.fpext.nxv16f32.nxv16f16(, , i32) - define @vfnmsac_vv_nxv16f32( %a, %b, %c, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vfnmsac_vv_nxv16f32: ; ZVFH: # %bb.0: @@ -674,10 +654,6 @@ define @vfnmsac_vf_nxv16f32_unmasked( ret %v } -declare @llvm.vp.fma.nxv1f64(, , , , i32) -declare @llvm.vp.fneg.nxv1f64(, , i32) -declare @llvm.vp.fpext.nxv1f64.nxv1f32(, , i32) - define @vfnmsac_vv_nxv1f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f64: ; CHECK: # %bb.0: @@ -754,10 +730,6 @@ define @vfnmsac_vf_nxv1f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv2f64(, , , , i32) -declare @llvm.vp.fneg.nxv2f64(, , i32) -declare @llvm.vp.fpext.nxv2f64.nxv2f32(, , i32) - define @vfnmsac_vv_nxv2f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f64: ; CHECK: # %bb.0: @@ -834,10 +806,6 @@ define @vfnmsac_vf_nxv2f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv4f64(, , , , i32) -declare @llvm.vp.fneg.nxv4f64(, , i32) -declare @llvm.vp.fpext.nxv4f64.nxv4f32(, , i32) - define @vfnmsac_vv_nxv4f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f64: ; CHECK: # %bb.0: @@ -914,10 +882,6 @@ define @vfnmsac_vf_nxv4f64_unmasked( % ret %v } -declare @llvm.vp.fma.nxv8f64(, , , , i32) -declare @llvm.vp.fneg.nxv8f64(, , i32) -declare @llvm.vp.fpext.nxv8f64.nxv8f32(, , i32) - define @vfnmsac_vv_nxv8f64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll index 5c62112aa9e3d..7bebb0084fcbf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -554,12 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -578,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -604,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -628,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -654,12 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv16f32.f16( - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -678,13 +503,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( - , - half, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -704,12 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv1f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -728,13 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv2f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -778,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -804,12 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv4f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -828,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -854,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.nxv8f64.f32( - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -878,13 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( - , - float, - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll index bbb019f2f5892..014f07c125391 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwredosum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv2f32.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.nxv1f64.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll index 05044ef689a92..fa3303de780b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -254,12 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -278,13 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -304,12 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -328,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -354,12 +263,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -378,13 +281,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -404,12 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -428,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -454,12 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -478,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -504,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -528,13 +392,6 @@ entry: ret %a } -declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll index d993e4e610d2c..3341669551fae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -338,13 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -364,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -389,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -415,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -440,13 +330,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -466,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -491,13 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll index b22899a100e4a..c1a295a36bcc2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16: ; CHECK: # %bb.0: # %entry @@ -455,12 +338,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.bf16( - , - , - bfloat, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry @@ -479,13 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( - , - , - bfloat, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll index 9a96364bf07d5..38878bee6999a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -29,13 +23,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,12 +42,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -80,13 +61,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,12 +80,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -132,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -158,12 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -184,13 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -210,12 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -236,13 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -262,12 +197,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -287,13 +216,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -313,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -339,13 +255,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -365,12 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -391,13 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -417,12 +313,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -443,13 +333,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -469,12 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -494,13 +371,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -520,12 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -545,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -571,12 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -596,13 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -622,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -647,13 +485,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -673,12 +504,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -698,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -724,12 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -749,13 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -775,12 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -800,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -826,12 +618,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -851,13 +637,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -877,12 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -902,13 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll index 7facc0ad483d7..d7af8f2a2746d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -28,13 +22,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -128,13 +96,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -154,12 +115,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -178,13 +133,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -204,12 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -255,12 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -305,12 +227,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -329,13 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -355,12 +264,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -379,13 +282,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -405,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( - , - , - , - iXLen, iXLen); - define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -429,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -456,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -480,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry @@ -506,12 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -530,13 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry @@ -556,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -580,13 +431,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry @@ -606,12 +450,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -630,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry @@ -656,12 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv16f32.f16( - , - , - half, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -680,13 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( - , - , - half, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry @@ -706,12 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv1f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -730,13 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv2f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -780,13 +579,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry @@ -806,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv4f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -830,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry @@ -856,12 +635,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.nxv8f64.f32( - , - , - float, - iXLen, iXLen); - define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry @@ -880,13 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( - , - , - float, - , - iXLen, iXLen, iXLen); - define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vghsh.ll b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll index 291d505d8faaf..3a4b1bfab6a31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vghsh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vghsh.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vghsh.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vghsh_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vghsh_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vgmul.ll b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll index 4498f71008704..240d6db9beef1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vgmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vgmul.vv.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vgmul.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vgmul_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vgmul_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vid.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll index 1a2ed18730ab7..f743f1eb2ff82 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vid.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vid.nxv1i8( - , - iXLen); - define @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -22,11 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -42,10 +33,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i8( - , - iXLen); - define @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -60,11 +47,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -80,10 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i8( - , - iXLen); - define @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -98,11 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,10 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i8( - , - iXLen); - define @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -136,11 +105,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -156,10 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv16i8( - , - iXLen); - define @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -174,11 +134,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv16i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -194,10 +149,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv32i8( - , - iXLen); - define @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -212,11 +163,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv32i8( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -232,10 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i16( - , - iXLen); - define @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -250,11 +192,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -270,10 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i16( - , - iXLen); - define @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -288,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -308,10 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i16( - , - iXLen); - define @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,11 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -346,10 +265,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i16( - , - iXLen); - define @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -364,11 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -384,10 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv16i16( - , - iXLen); - define @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -402,11 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv16i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -422,10 +323,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv32i16( - , - iXLen); - define @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -440,11 +337,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv32i16( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -460,10 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i32( - , - iXLen); - define @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -478,11 +366,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -498,10 +381,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i32( - , - iXLen); - define @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -516,11 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -536,10 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i32( - , - iXLen); - define @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -554,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -574,10 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i32( - , - iXLen); - define @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -592,11 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -612,10 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv16i32( - , - iXLen); - define @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -630,11 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -650,10 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv1i64( - , - iXLen); - define @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -668,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv1i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -688,10 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv2i64( - , - iXLen); - define @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -706,11 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv2i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -726,10 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv4i64( - , - iXLen); - define @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -744,11 +569,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv4i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -764,10 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vid.nxv8i64( - , - iXLen); - define @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -782,11 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vid.mask.nxv8i64( - , - , - iXLen, iXLen); - define @intrinsic_vid_mask_v_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/viota.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll index a60aca3c4f065..16eda451735e9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/viota.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.viota.nxv1i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -66,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -108,12 +81,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -150,12 +112,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv16i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -192,12 +143,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv32i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -234,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv64i8( - , - , - iXLen); - define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -276,12 +205,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv64i8( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -298,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -318,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -340,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -360,12 +267,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -382,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -402,12 +298,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -424,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -444,12 +329,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -466,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv16i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -486,12 +360,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -508,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv32i16( - , - , - iXLen); - define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -528,12 +391,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv32i16( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -550,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -570,12 +422,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -592,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -612,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -634,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -654,12 +484,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -676,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -696,12 +515,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -718,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv16i32( - , - , - iXLen); - define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -738,12 +546,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv16i32( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -760,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv1i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -780,12 +577,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv1i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -802,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv2i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -822,12 +608,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv2i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -844,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv4i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -864,12 +639,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv4i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -886,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.viota.nxv8i64( - , - , - iXLen); - define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -906,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.viota.mask.nxv8i64( - , - , - , - iXLen, iXLen); - define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll index 599a66d191fd2..b93a4c166538a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll @@ -4,7 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(, metadata, metadata) define @vsitofp_nxv1i1_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16: ; CHECK: # %bb.0: @@ -17,7 +16,6 @@ define @vsitofp_nxv1i1_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(, metadata, metadata) define @vuitofp_nxv1i1_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16: ; CHECK: # %bb.0: @@ -30,7 +28,6 @@ define @vuitofp_nxv1i1_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(, metadata, metadata) define @vsitofp_nxv1i1_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32: ; CHECK: # %bb.0: @@ -43,7 +40,6 @@ define @vsitofp_nxv1i1_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(, metadata, metadata) define @vuitofp_nxv1i1_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32: ; CHECK: # %bb.0: @@ -56,7 +52,6 @@ define @vuitofp_nxv1i1_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(, metadata, metadata) define @vsitofp_nxv1i1_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64: ; CHECK: # %bb.0: @@ -69,7 +64,6 @@ define @vsitofp_nxv1i1_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(, metadata, metadata) define @vuitofp_nxv1i1_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64: ; CHECK: # %bb.0: @@ -82,7 +76,6 @@ define @vuitofp_nxv1i1_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(, metadata, metadata) define @vsitofp_nxv2i1_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16: ; CHECK: # %bb.0: @@ -95,7 +88,6 @@ define @vsitofp_nxv2i1_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(, metadata, metadata) define @vuitofp_nxv2i1_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16: ; CHECK: # %bb.0: @@ -108,7 +100,6 @@ define @vuitofp_nxv2i1_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(, metadata, metadata) define @vsitofp_nxv2i1_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -121,7 +112,6 @@ define @vsitofp_nxv2i1_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(, metadata, metadata) define @vuitofp_nxv2i1_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32: ; CHECK: # %bb.0: @@ -134,7 +124,6 @@ define @vuitofp_nxv2i1_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(, metadata, metadata) define @vsitofp_nxv2i1_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64: ; CHECK: # %bb.0: @@ -147,7 +136,6 @@ define @vsitofp_nxv2i1_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(, metadata, metadata) define @vuitofp_nxv2i1_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64: ; CHECK: # %bb.0: @@ -160,7 +148,6 @@ define @vuitofp_nxv2i1_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(, metadata, metadata) define @vsitofp_nxv4i1_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16: ; CHECK: # %bb.0: @@ -173,7 +160,6 @@ define @vsitofp_nxv4i1_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(, metadata, metadata) define @vuitofp_nxv4i1_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16: ; CHECK: # %bb.0: @@ -186,7 +172,6 @@ define @vuitofp_nxv4i1_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(, metadata, metadata) define @vsitofp_nxv4i1_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32: ; CHECK: # %bb.0: @@ -199,7 +184,6 @@ define @vsitofp_nxv4i1_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(, metadata, metadata) define @vuitofp_nxv4i1_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32: ; CHECK: # %bb.0: @@ -212,7 +196,6 @@ define @vuitofp_nxv4i1_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(, metadata, metadata) define @vsitofp_nxv4i1_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64: ; CHECK: # %bb.0: @@ -225,7 +208,6 @@ define @vsitofp_nxv4i1_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(, metadata, metadata) define @vuitofp_nxv4i1_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64: ; CHECK: # %bb.0: @@ -238,7 +220,6 @@ define @vuitofp_nxv4i1_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(, metadata, metadata) define @vsitofp_nxv8i1_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16: ; CHECK: # %bb.0: @@ -251,7 +232,6 @@ define @vsitofp_nxv8i1_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(, metadata, metadata) define @vuitofp_nxv8i1_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16: ; CHECK: # %bb.0: @@ -264,7 +244,6 @@ define @vuitofp_nxv8i1_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(, metadata, metadata) define @vsitofp_nxv8i1_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32: ; CHECK: # %bb.0: @@ -277,7 +256,6 @@ define @vsitofp_nxv8i1_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(, metadata, metadata) define @vuitofp_nxv8i1_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32: ; CHECK: # %bb.0: @@ -290,7 +268,6 @@ define @vuitofp_nxv8i1_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(, metadata, metadata) define @vsitofp_nxv8i1_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64: ; CHECK: # %bb.0: @@ -303,7 +280,6 @@ define @vsitofp_nxv8i1_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(, metadata, metadata) define @vuitofp_nxv8i1_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64: ; CHECK: # %bb.0: @@ -316,7 +292,6 @@ define @vuitofp_nxv8i1_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(, metadata, metadata) define @vsitofp_nxv16i1_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16: ; CHECK: # %bb.0: @@ -329,7 +304,6 @@ define @vsitofp_nxv16i1_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(, metadata, metadata) define @vuitofp_nxv16i1_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16: ; CHECK: # %bb.0: @@ -342,7 +316,6 @@ define @vuitofp_nxv16i1_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(, metadata, metadata) define @vsitofp_nxv16i1_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32: ; CHECK: # %bb.0: @@ -355,7 +328,6 @@ define @vsitofp_nxv16i1_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(, metadata, metadata) define @vuitofp_nxv16i1_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32: ; CHECK: # %bb.0: @@ -368,7 +340,6 @@ define @vuitofp_nxv16i1_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(, metadata, metadata) define @vsitofp_nxv32i1_nxv32f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16: ; CHECK: # %bb.0: @@ -381,7 +352,6 @@ define @vsitofp_nxv32i1_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(, metadata, metadata) define @vuitofp_nxv32i1_nxv32f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16: ; CHECK: # %bb.0: @@ -394,7 +364,6 @@ define @vuitofp_nxv32i1_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(, metadata, metadata) define @vsitofp_nxv1i8_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16: ; CHECK: # %bb.0: @@ -406,7 +375,6 @@ define @vsitofp_nxv1i8_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(, metadata, metadata) define @vsitofp_nxv1i7_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16: ; CHECK: # %bb.0: @@ -419,7 +387,6 @@ define @vsitofp_nxv1i7_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(, metadata, metadata) define @vuitofp_nxv1i7_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16: ; CHECK: # %bb.0: @@ -432,7 +399,6 @@ define @vuitofp_nxv1i7_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(, metadata, metadata) define @vuitofp_nxv1i8_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16: ; CHECK: # %bb.0: @@ -444,7 +410,6 @@ define @vuitofp_nxv1i8_nxv1f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(, metadata, metadata) define @vsitofp_nxv1i8_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32: ; CHECK: # %bb.0: @@ -456,7 +421,6 @@ define @vsitofp_nxv1i8_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(, metadata, metadata) define @vuitofp_nxv1i8_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32: ; CHECK: # %bb.0: @@ -468,7 +432,6 @@ define @vuitofp_nxv1i8_nxv1f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(, metadata, metadata) define @vsitofp_nxv1i8_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64: ; CHECK: # %bb.0: @@ -480,7 +443,6 @@ define @vsitofp_nxv1i8_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(, metadata, metadata) define @vuitofp_nxv1i8_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64: ; CHECK: # %bb.0: @@ -492,7 +454,6 @@ define @vuitofp_nxv1i8_nxv1f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(, metadata, metadata) define @vsitofp_nxv2i8_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16: ; CHECK: # %bb.0: @@ -504,7 +465,6 @@ define @vsitofp_nxv2i8_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(, metadata, metadata) define @vuitofp_nxv2i8_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16: ; CHECK: # %bb.0: @@ -516,7 +476,6 @@ define @vuitofp_nxv2i8_nxv2f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(, metadata, metadata) define @vsitofp_nxv2i8_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -528,7 +487,6 @@ define @vsitofp_nxv2i8_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(, metadata, metadata) define @vuitofp_nxv2i8_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32: ; CHECK: # %bb.0: @@ -540,7 +498,6 @@ define @vuitofp_nxv2i8_nxv2f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(, metadata, metadata) define @vsitofp_nxv2i8_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -552,7 +509,6 @@ define @vsitofp_nxv2i8_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(, metadata, metadata) define @vuitofp_nxv2i8_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64: ; CHECK: # %bb.0: @@ -564,7 +520,6 @@ define @vuitofp_nxv2i8_nxv2f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(, metadata, metadata) define @vsitofp_nxv4i8_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16: ; CHECK: # %bb.0: @@ -576,7 +531,6 @@ define @vsitofp_nxv4i8_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(, metadata, metadata) define @vuitofp_nxv4i8_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16: ; CHECK: # %bb.0: @@ -588,7 +542,6 @@ define @vuitofp_nxv4i8_nxv4f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(, metadata, metadata) define @vsitofp_nxv4i8_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32: ; CHECK: # %bb.0: @@ -600,7 +553,6 @@ define @vsitofp_nxv4i8_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(, metadata, metadata) define @vuitofp_nxv4i8_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32: ; CHECK: # %bb.0: @@ -612,7 +564,6 @@ define @vuitofp_nxv4i8_nxv4f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(, metadata, metadata) define @vsitofp_nxv4i8_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64: ; CHECK: # %bb.0: @@ -624,7 +575,6 @@ define @vsitofp_nxv4i8_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(, metadata, metadata) define @vuitofp_nxv4i8_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64: ; CHECK: # %bb.0: @@ -636,7 +586,6 @@ define @vuitofp_nxv4i8_nxv4f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(, metadata, metadata) define @vsitofp_nxv8i8_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16: ; CHECK: # %bb.0: @@ -648,7 +597,6 @@ define @vsitofp_nxv8i8_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(, metadata, metadata) define @vuitofp_nxv8i8_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16: ; CHECK: # %bb.0: @@ -660,7 +608,6 @@ define @vuitofp_nxv8i8_nxv8f16( %va) strict ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(, metadata, metadata) define @vsitofp_nxv8i8_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32: ; CHECK: # %bb.0: @@ -672,7 +619,6 @@ define @vsitofp_nxv8i8_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(, metadata, metadata) define @vuitofp_nxv8i8_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32: ; CHECK: # %bb.0: @@ -684,7 +630,6 @@ define @vuitofp_nxv8i8_nxv8f32( %va) stric ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(, metadata, metadata) define @vsitofp_nxv8i8_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64: ; CHECK: # %bb.0: @@ -696,7 +641,6 @@ define @vsitofp_nxv8i8_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(, metadata, metadata) define @vuitofp_nxv8i8_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64: ; CHECK: # %bb.0: @@ -708,7 +652,6 @@ define @vuitofp_nxv8i8_nxv8f64( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(, metadata, metadata) define @vsitofp_nxv16i8_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16: ; CHECK: # %bb.0: @@ -720,7 +663,6 @@ define @vsitofp_nxv16i8_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(, metadata, metadata) define @vuitofp_nxv16i8_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16: ; CHECK: # %bb.0: @@ -732,7 +674,6 @@ define @vuitofp_nxv16i8_nxv16f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(, metadata, metadata) define @vsitofp_nxv16i8_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32: ; CHECK: # %bb.0: @@ -744,7 +685,6 @@ define @vsitofp_nxv16i8_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(, metadata, metadata) define @vuitofp_nxv16i8_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32: ; CHECK: # %bb.0: @@ -756,7 +696,6 @@ define @vuitofp_nxv16i8_nxv16f32( %va) s ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(, metadata, metadata) define @vsitofp_nxv32i8_nxv32f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16: ; CHECK: # %bb.0: @@ -768,7 +707,6 @@ define @vsitofp_nxv32i8_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(, metadata, metadata) define @vuitofp_nxv32i8_nxv32f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16: ; CHECK: # %bb.0: @@ -780,7 +718,6 @@ define @vuitofp_nxv32i8_nxv32f16( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(, metadata, metadata) define @vsitofp_nxv1i16_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16: ; CHECK: # %bb.0: @@ -791,7 +728,6 @@ define @vsitofp_nxv1i16_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(, metadata, metadata) define @vuitofp_nxv1i16_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16: ; CHECK: # %bb.0: @@ -802,7 +738,6 @@ define @vuitofp_nxv1i16_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(, metadata, metadata) define @vsitofp_nxv1i16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32: ; CHECK: # %bb.0: @@ -814,7 +749,6 @@ define @vsitofp_nxv1i16_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(, metadata, metadata) define @vuitofp_nxv1i16_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32: ; CHECK: # %bb.0: @@ -826,7 +760,6 @@ define @vuitofp_nxv1i16_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(, metadata, metadata) define @vsitofp_nxv1i16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64: ; CHECK: # %bb.0: @@ -838,7 +771,6 @@ define @vsitofp_nxv1i16_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(, metadata, metadata) define @vuitofp_nxv1i16_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64: ; CHECK: # %bb.0: @@ -850,7 +782,6 @@ define @vuitofp_nxv1i16_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(, metadata, metadata) define @vsitofp_nxv2i16_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16: ; CHECK: # %bb.0: @@ -861,7 +792,6 @@ define @vsitofp_nxv2i16_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(, metadata, metadata) define @vuitofp_nxv2i16_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16: ; CHECK: # %bb.0: @@ -872,7 +802,6 @@ define @vuitofp_nxv2i16_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(, metadata, metadata) define @vsitofp_nxv2i16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -884,7 +813,6 @@ define @vsitofp_nxv2i16_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(, metadata, metadata) define @vuitofp_nxv2i16_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32: ; CHECK: # %bb.0: @@ -896,7 +824,6 @@ define @vuitofp_nxv2i16_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(, metadata, metadata) define @vsitofp_nxv2i16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -908,7 +835,6 @@ define @vsitofp_nxv2i16_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(, metadata, metadata) define @vuitofp_nxv2i16_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64: ; CHECK: # %bb.0: @@ -920,7 +846,6 @@ define @vuitofp_nxv2i16_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(, metadata, metadata) define @vsitofp_nxv4i16_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16: ; CHECK: # %bb.0: @@ -931,7 +856,6 @@ define @vsitofp_nxv4i16_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(, metadata, metadata) define @vuitofp_nxv4i16_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16: ; CHECK: # %bb.0: @@ -942,7 +866,6 @@ define @vuitofp_nxv4i16_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(, metadata, metadata) define @vsitofp_nxv4i16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32: ; CHECK: # %bb.0: @@ -954,7 +877,6 @@ define @vsitofp_nxv4i16_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(, metadata, metadata) define @vuitofp_nxv4i16_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32: ; CHECK: # %bb.0: @@ -966,7 +888,6 @@ define @vuitofp_nxv4i16_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(, metadata, metadata) define @vsitofp_nxv4i16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64: ; CHECK: # %bb.0: @@ -978,7 +899,6 @@ define @vsitofp_nxv4i16_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(, metadata, metadata) define @vuitofp_nxv4i16_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64: ; CHECK: # %bb.0: @@ -990,7 +910,6 @@ define @vuitofp_nxv4i16_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(, metadata, metadata) define @vsitofp_nxv8i16_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16: ; CHECK: # %bb.0: @@ -1001,7 +920,6 @@ define @vsitofp_nxv8i16_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(, metadata, metadata) define @vuitofp_nxv8i16_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16: ; CHECK: # %bb.0: @@ -1012,7 +930,6 @@ define @vuitofp_nxv8i16_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(, metadata, metadata) define @vsitofp_nxv8i16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32: ; CHECK: # %bb.0: @@ -1024,7 +941,6 @@ define @vsitofp_nxv8i16_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(, metadata, metadata) define @vuitofp_nxv8i16_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32: ; CHECK: # %bb.0: @@ -1036,7 +952,6 @@ define @vuitofp_nxv8i16_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(, metadata, metadata) define @vsitofp_nxv8i16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64: ; CHECK: # %bb.0: @@ -1048,7 +963,6 @@ define @vsitofp_nxv8i16_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(, metadata, metadata) define @vuitofp_nxv8i16_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64: ; CHECK: # %bb.0: @@ -1060,7 +974,6 @@ define @vuitofp_nxv8i16_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(, metadata, metadata) define @vsitofp_nxv16i16_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16: ; CHECK: # %bb.0: @@ -1071,7 +984,6 @@ define @vsitofp_nxv16i16_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(, metadata, metadata) define @vuitofp_nxv16i16_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16: ; CHECK: # %bb.0: @@ -1082,7 +994,6 @@ define @vuitofp_nxv16i16_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(, metadata, metadata) define @vsitofp_nxv16i16_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32: ; CHECK: # %bb.0: @@ -1094,7 +1005,6 @@ define @vsitofp_nxv16i16_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(, metadata, metadata) define @vuitofp_nxv16i16_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32: ; CHECK: # %bb.0: @@ -1106,7 +1016,6 @@ define @vuitofp_nxv16i16_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(, metadata, metadata) define @vsitofp_nxv32i16_nxv32f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16: ; CHECK: # %bb.0: @@ -1117,7 +1026,6 @@ define @vsitofp_nxv32i16_nxv32f16( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(, metadata, metadata) define @vuitofp_nxv32i16_nxv32f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16: ; CHECK: # %bb.0: @@ -1128,7 +1036,6 @@ define @vuitofp_nxv32i16_nxv32f16( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(, metadata, metadata) define @vsitofp_nxv1i32_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16: ; CHECK: # %bb.0: @@ -1140,7 +1047,6 @@ define @vsitofp_nxv1i32_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(, metadata, metadata) define @vuitofp_nxv1i32_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16: ; CHECK: # %bb.0: @@ -1152,7 +1058,6 @@ define @vuitofp_nxv1i32_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(, metadata, metadata) define @vsitofp_nxv1i32_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32: ; CHECK: # %bb.0: @@ -1163,7 +1068,6 @@ define @vsitofp_nxv1i32_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(, metadata, metadata) define @vuitofp_nxv1i32_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32: ; CHECK: # %bb.0: @@ -1174,7 +1078,6 @@ define @vuitofp_nxv1i32_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(, metadata, metadata) define @vsitofp_nxv1i32_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64: ; CHECK: # %bb.0: @@ -1186,7 +1089,6 @@ define @vsitofp_nxv1i32_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(, metadata, metadata) define @vuitofp_nxv1i32_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64: ; CHECK: # %bb.0: @@ -1198,7 +1100,6 @@ define @vuitofp_nxv1i32_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(, metadata, metadata) define @vsitofp_nxv2i32_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16: ; CHECK: # %bb.0: @@ -1210,7 +1111,6 @@ define @vsitofp_nxv2i32_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(, metadata, metadata) define @vuitofp_nxv2i32_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16: ; CHECK: # %bb.0: @@ -1222,7 +1122,6 @@ define @vuitofp_nxv2i32_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(, metadata, metadata) define @vsitofp_nxv2i32_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -1233,7 +1132,6 @@ define @vsitofp_nxv2i32_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(, metadata, metadata) define @vuitofp_nxv2i32_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32: ; CHECK: # %bb.0: @@ -1244,7 +1142,6 @@ define @vuitofp_nxv2i32_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(, metadata, metadata) define @vsitofp_nxv2i32_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -1256,7 +1153,6 @@ define @vsitofp_nxv2i32_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(, metadata, metadata) define @vuitofp_nxv2i32_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64: ; CHECK: # %bb.0: @@ -1268,7 +1164,6 @@ define @vuitofp_nxv2i32_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(, metadata, metadata) define @vsitofp_nxv4i32_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16: ; CHECK: # %bb.0: @@ -1280,7 +1175,6 @@ define @vsitofp_nxv4i32_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(, metadata, metadata) define @vuitofp_nxv4i32_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16: ; CHECK: # %bb.0: @@ -1292,7 +1186,6 @@ define @vuitofp_nxv4i32_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(, metadata, metadata) define @vsitofp_nxv4i32_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32: ; CHECK: # %bb.0: @@ -1303,7 +1196,6 @@ define @vsitofp_nxv4i32_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(, metadata, metadata) define @vuitofp_nxv4i32_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32: ; CHECK: # %bb.0: @@ -1314,7 +1206,6 @@ define @vuitofp_nxv4i32_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(, metadata, metadata) define @vsitofp_nxv4i32_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64: ; CHECK: # %bb.0: @@ -1326,7 +1217,6 @@ define @vsitofp_nxv4i32_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(, metadata, metadata) define @vuitofp_nxv4i32_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64: ; CHECK: # %bb.0: @@ -1338,7 +1228,6 @@ define @vuitofp_nxv4i32_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(, metadata, metadata) define @vsitofp_nxv8i32_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16: ; CHECK: # %bb.0: @@ -1350,7 +1239,6 @@ define @vsitofp_nxv8i32_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(, metadata, metadata) define @vuitofp_nxv8i32_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16: ; CHECK: # %bb.0: @@ -1362,7 +1250,6 @@ define @vuitofp_nxv8i32_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(, metadata, metadata) define @vsitofp_nxv8i32_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32: ; CHECK: # %bb.0: @@ -1373,7 +1260,6 @@ define @vsitofp_nxv8i32_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(, metadata, metadata) define @vuitofp_nxv8i32_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32: ; CHECK: # %bb.0: @@ -1384,7 +1270,6 @@ define @vuitofp_nxv8i32_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(, metadata, metadata) define @vsitofp_nxv8i32_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64: ; CHECK: # %bb.0: @@ -1396,7 +1281,6 @@ define @vsitofp_nxv8i32_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(, metadata, metadata) define @vuitofp_nxv8i32_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64: ; CHECK: # %bb.0: @@ -1408,7 +1292,6 @@ define @vuitofp_nxv8i32_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(, metadata, metadata) define @vsitofp_nxv16i32_nxv16f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16: ; CHECK: # %bb.0: @@ -1420,7 +1303,6 @@ define @vsitofp_nxv16i32_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(, metadata, metadata) define @vuitofp_nxv16i32_nxv16f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16: ; CHECK: # %bb.0: @@ -1432,7 +1314,6 @@ define @vuitofp_nxv16i32_nxv16f16( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(, metadata, metadata) define @vsitofp_nxv16i32_nxv16f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32: ; CHECK: # %bb.0: @@ -1443,7 +1324,6 @@ define @vsitofp_nxv16i32_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(, metadata, metadata) define @vuitofp_nxv16i32_nxv16f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32: ; CHECK: # %bb.0: @@ -1454,7 +1334,6 @@ define @vuitofp_nxv16i32_nxv16f32( %va) ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(, metadata, metadata) define @vsitofp_nxv1i64_nxv1f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16: ; CHECK: # %bb.0: @@ -1467,7 +1346,6 @@ define @vsitofp_nxv1i64_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(, metadata, metadata) define @vuitofp_nxv1i64_nxv1f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16: ; CHECK: # %bb.0: @@ -1480,7 +1358,6 @@ define @vuitofp_nxv1i64_nxv1f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(, metadata, metadata) define @vsitofp_nxv1i64_nxv1f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32: ; CHECK: # %bb.0: @@ -1492,7 +1369,6 @@ define @vsitofp_nxv1i64_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(, metadata, metadata) define @vuitofp_nxv1i64_nxv1f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32: ; CHECK: # %bb.0: @@ -1504,7 +1380,6 @@ define @vuitofp_nxv1i64_nxv1f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(, metadata, metadata) define @vsitofp_nxv1i64_nxv1f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64: ; CHECK: # %bb.0: @@ -1515,7 +1390,6 @@ define @vsitofp_nxv1i64_nxv1f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(, metadata, metadata) define @vuitofp_nxv1i64_nxv1f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64: ; CHECK: # %bb.0: @@ -1526,8 +1400,6 @@ define @vuitofp_nxv1i64_nxv1f64( %va) st ret %evec } - -declare @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(, metadata, metadata) define @vsitofp_nxv2i64_nxv2f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16: ; CHECK: # %bb.0: @@ -1540,7 +1412,6 @@ define @vsitofp_nxv2i64_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(, metadata, metadata) define @vuitofp_nxv2i64_nxv2f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16: ; CHECK: # %bb.0: @@ -1553,7 +1424,6 @@ define @vuitofp_nxv2i64_nxv2f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(, metadata, metadata) define @vsitofp_nxv2i64_nxv2f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -1565,7 +1435,6 @@ define @vsitofp_nxv2i64_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(, metadata, metadata) define @vuitofp_nxv2i64_nxv2f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32: ; CHECK: # %bb.0: @@ -1577,7 +1446,6 @@ define @vuitofp_nxv2i64_nxv2f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(, metadata, metadata) define @vsitofp_nxv2i64_nxv2f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -1588,7 +1456,6 @@ define @vsitofp_nxv2i64_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(, metadata, metadata) define @vuitofp_nxv2i64_nxv2f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64: ; CHECK: # %bb.0: @@ -1599,7 +1466,6 @@ define @vuitofp_nxv2i64_nxv2f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(, metadata, metadata) define @vsitofp_nxv4i64_nxv4f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16: ; CHECK: # %bb.0: @@ -1612,7 +1478,6 @@ define @vsitofp_nxv4i64_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(, metadata, metadata) define @vuitofp_nxv4i64_nxv4f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16: ; CHECK: # %bb.0: @@ -1625,7 +1490,6 @@ define @vuitofp_nxv4i64_nxv4f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(, metadata, metadata) define @vsitofp_nxv4i64_nxv4f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32: ; CHECK: # %bb.0: @@ -1637,7 +1501,6 @@ define @vsitofp_nxv4i64_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(, metadata, metadata) define @vuitofp_nxv4i64_nxv4f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32: ; CHECK: # %bb.0: @@ -1649,7 +1512,6 @@ define @vuitofp_nxv4i64_nxv4f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(, metadata, metadata) define @vsitofp_nxv4i64_nxv4f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64: ; CHECK: # %bb.0: @@ -1660,7 +1522,6 @@ define @vsitofp_nxv4i64_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(, metadata, metadata) define @vuitofp_nxv4i64_nxv4f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64: ; CHECK: # %bb.0: @@ -1671,7 +1532,6 @@ define @vuitofp_nxv4i64_nxv4f64( %va) st ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(, metadata, metadata) define @vsitofp_nxv8i64_nxv8f16( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16: ; CHECK: # %bb.0: @@ -1684,7 +1544,6 @@ define @vsitofp_nxv8i64_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(, metadata, metadata) define @vuitofp_nxv8i64_nxv8f16( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16: ; CHECK: # %bb.0: @@ -1697,7 +1556,6 @@ define @vuitofp_nxv8i64_nxv8f16( %va) stri ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(, metadata, metadata) define @vsitofp_nxv8i64_nxv8f32( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32: ; CHECK: # %bb.0: @@ -1709,7 +1567,6 @@ define @vsitofp_nxv8i64_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(, metadata, metadata) define @vuitofp_nxv8i64_nxv8f32( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32: ; CHECK: # %bb.0: @@ -1721,7 +1578,6 @@ define @vuitofp_nxv8i64_nxv8f32( %va) str ret %evec } -declare @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(, metadata, metadata) define @vsitofp_nxv8i64_nxv8f64( %va) strictfp { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64: ; CHECK: # %bb.0: @@ -1732,7 +1588,6 @@ define @vsitofp_nxv8i64_nxv8f64( %va) st ret %evec } -declare @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(, metadata, metadata) define @vuitofp_nxv8i64_nxv8f64( %va) strictfp { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll index e1f641afd2cfe..9560249972141 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-no-prop.ll @@ -2,26 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vadd.nxv4i32.nxv4i32(, , , iXLen) -declare @llvm.riscv.vrgather.vv.nxv4i32.iXLen( - , - , - , - iXLen) - -declare @llvm.riscv.vslidedown.nxv4i32( - , - , - iXLen, - iXLen, - iXLen); - -declare @llvm.riscv.vslide1down.nxv4i32.i32( - , - , - i32, - iXLen); - define @vrgather( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: vrgather: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll index b4ebf5444df7c..866e4e7e11ab6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll @@ -27,12 +27,6 @@ entry: ret <2 x i32> %y12 } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( %a, %b, iXLen %2, %3, %4, %z) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -53,12 +47,6 @@ entry: ret %x } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen, iXLen); - define @vnclip( %a, %b, iXLen %2, %3, %4, %z) nounwind { ; CHECK-LABEL: vnclip: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll index 359601150cb98..ed407dc2161e0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll @@ -2,8 +2,6 @@ ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vadd.nxv4i32.nxv4i32(, , , iXLen) - define @different_imm_vl_with_ta( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: different_imm_vl_with_ta: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vle.ll b/llvm/test/CodeGen/RISCV/rvv/vle.ll index 0b67d683ed8be..968161dd204ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vle.nxv1i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f64( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f32( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -950,11 +686,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -993,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1013,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1036,11 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1f16_nxv1f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1056,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1079,11 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2f16_nxv2f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1099,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1122,11 +810,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4f16_nxv4f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1142,13 +825,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1165,11 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8f16_nxv8f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1185,13 +856,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1208,11 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16f16_nxv16f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1228,13 +887,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +903,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32f16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32f16_nxv32f16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1294,11 +934,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1314,13 +949,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1337,11 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1357,13 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1380,11 +996,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1400,13 +1011,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1423,11 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1443,13 +1042,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1466,11 +1058,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1486,13 +1073,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1509,11 +1089,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32bf16( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1529,13 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1552,11 +1120,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1572,13 +1135,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv1i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1595,11 +1151,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv2i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1166,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv2i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1638,11 +1182,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv4i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1658,13 +1197,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv4i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1681,11 +1213,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv8i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1701,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv8i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1724,11 +1244,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv16i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1744,13 +1259,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv16i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1767,11 +1275,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv32i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1787,13 +1290,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv32i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1810,11 +1306,6 @@ entry: ret %a } -declare @llvm.riscv.vle.nxv64i8( - , - ptr, - iXLen); - define @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1321,6 @@ entry: ret %a } -declare @llvm.riscv.vle.mask.nxv64i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll index ca3ed15bc40e4..6b6276b838fba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -1,12 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s -declare { , i64 } @llvm.riscv.vleff.nxv8i8(, ptr, i64) -declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(, ptr, , i64, i64 immarg) - -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vleff_nxv8i8 ; CHECK: bb.0.entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll index 924d16ac4afb6..e20acd725a095 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 -declare { , iXLen } @llvm.riscv.vleff.nxv1i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -36,13 +31,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -72,11 +60,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -104,13 +87,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -140,11 +116,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -172,13 +143,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -208,11 +172,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: ; RV32: # %bb.0: # %entry @@ -240,13 +199,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64: ; RV32: # %bb.0: # %entry @@ -276,11 +228,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: ; RV32: # %bb.0: # %entry @@ -308,13 +255,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64: ; RV32: # %bb.0: # %entry @@ -344,11 +284,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: ; RV32: # %bb.0: # %entry @@ -376,13 +311,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64: ; RV32: # %bb.0: # %entry @@ -412,11 +340,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: ; RV32: # %bb.0: # %entry @@ -444,13 +367,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64: ; RV32: # %bb.0: # %entry @@ -480,11 +396,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8f64( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: ; RV32: # %bb.0: # %entry @@ -512,13 +423,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f64( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64: ; RV32: # %bb.0: # %entry @@ -548,11 +452,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: ; RV32: # %bb.0: # %entry @@ -580,13 +479,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32: ; RV32: # %bb.0: # %entry @@ -616,11 +508,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: ; RV32: # %bb.0: # %entry @@ -648,13 +535,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32: ; RV32: # %bb.0: # %entry @@ -684,11 +564,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: ; RV32: # %bb.0: # %entry @@ -716,13 +591,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32: ; RV32: # %bb.0: # %entry @@ -752,11 +620,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: ; RV32: # %bb.0: # %entry @@ -784,13 +647,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32: ; RV32: # %bb.0: # %entry @@ -820,11 +676,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16i32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: ; RV32: # %bb.0: # %entry @@ -852,13 +703,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32: ; RV32: # %bb.0: # %entry @@ -888,11 +732,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: ; RV32: # %bb.0: # %entry @@ -920,13 +759,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32: ; RV32: # %bb.0: # %entry @@ -956,11 +788,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: ; RV32: # %bb.0: # %entry @@ -988,13 +815,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32: ; RV32: # %bb.0: # %entry @@ -1024,11 +844,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: ; RV32: # %bb.0: # %entry @@ -1056,13 +871,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32: ; RV32: # %bb.0: # %entry @@ -1092,11 +900,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: ; RV32: # %bb.0: # %entry @@ -1124,13 +927,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32: ; RV32: # %bb.0: # %entry @@ -1160,11 +956,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16f32( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: ; RV32: # %bb.0: # %entry @@ -1192,13 +983,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16f32( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32: ; RV32: # %bb.0: # %entry @@ -1228,11 +1012,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: ; RV32: # %bb.0: # %entry @@ -1260,13 +1039,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16: ; RV32: # %bb.0: # %entry @@ -1296,11 +1068,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: ; RV32: # %bb.0: # %entry @@ -1328,13 +1095,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16: ; RV32: # %bb.0: # %entry @@ -1364,11 +1124,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: ; RV32: # %bb.0: # %entry @@ -1396,13 +1151,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16: ; RV32: # %bb.0: # %entry @@ -1432,11 +1180,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: ; RV32: # %bb.0: # %entry @@ -1464,13 +1207,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16: ; RV32: # %bb.0: # %entry @@ -1500,11 +1236,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: ; RV32: # %bb.0: # %entry @@ -1532,13 +1263,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16: ; RV32: # %bb.0: # %entry @@ -1568,11 +1292,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32i16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: ; RV32: # %bb.0: # %entry @@ -1600,13 +1319,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32i16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16: ; RV32: # %bb.0: # %entry @@ -1636,11 +1348,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1half_nxv1bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16: ; RV32: # %bb.0: # %entry @@ -1668,13 +1375,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1half_nxv1bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16: ; RV32: # %bb.0: # %entry @@ -1704,11 +1404,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2half_nxv2bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16: ; RV32: # %bb.0: # %entry @@ -1736,13 +1431,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2half_nxv2bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16: ; RV32: # %bb.0: # %entry @@ -1772,11 +1460,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4half_nxv4bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16: ; RV32: # %bb.0: # %entry @@ -1804,13 +1487,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4half_nxv4bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16: ; RV32: # %bb.0: # %entry @@ -1840,11 +1516,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8half_nxv8bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16: ; RV32: # %bb.0: # %entry @@ -1872,13 +1543,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8half_nxv8bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16: ; RV32: # %bb.0: # %entry @@ -1908,11 +1572,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16half_nxv16bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16: ; RV32: # %bb.0: # %entry @@ -1940,13 +1599,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16half_nxv16bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16: ; RV32: # %bb.0: # %entry @@ -1976,11 +1628,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32bf16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32half_nxv32bf16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16: ; RV32: # %bb.0: # %entry @@ -2008,13 +1655,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32bf16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32half_nxv32bf16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16: ; RV32: # %bb.0: # %entry @@ -2044,11 +1684,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1bfloat_nxv1f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16: ; RV32: # %bb.0: # %entry @@ -2076,13 +1711,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16: ; RV32: # %bb.0: # %entry @@ -2112,11 +1740,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2bfloat_nxv2f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16: ; RV32: # %bb.0: # %entry @@ -2144,13 +1767,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16: ; RV32: # %bb.0: # %entry @@ -2180,11 +1796,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4bfloat_nxv4f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16: ; RV32: # %bb.0: # %entry @@ -2212,13 +1823,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16: ; RV32: # %bb.0: # %entry @@ -2248,11 +1852,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8bfloat_nxv8f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16: ; RV32: # %bb.0: # %entry @@ -2280,13 +1879,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16: ; RV32: # %bb.0: # %entry @@ -2316,11 +1908,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16bfloat_nxv16f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16: ; RV32: # %bb.0: # %entry @@ -2348,13 +1935,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16: ; RV32: # %bb.0: # %entry @@ -2384,11 +1964,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32f16( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32bfloat_nxv32f16(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16: ; RV32: # %bb.0: # %entry @@ -2416,13 +1991,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32f16( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16: ; RV32: # %bb.0: # %entry @@ -2452,11 +2020,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv1i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: ; RV32: # %bb.0: # %entry @@ -2484,13 +2047,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8: ; RV32: # %bb.0: # %entry @@ -2520,11 +2076,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv2i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: ; RV32: # %bb.0: # %entry @@ -2552,13 +2103,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv2i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8: ; RV32: # %bb.0: # %entry @@ -2588,11 +2132,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv4i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: ; RV32: # %bb.0: # %entry @@ -2620,13 +2159,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv4i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8: ; RV32: # %bb.0: # %entry @@ -2656,11 +2188,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv8i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: ; RV32: # %bb.0: # %entry @@ -2688,13 +2215,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv8i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8: ; RV32: # %bb.0: # %entry @@ -2724,11 +2244,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv16i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: ; RV32: # %bb.0: # %entry @@ -2756,13 +2271,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv16i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8: ; RV32: # %bb.0: # %entry @@ -2792,11 +2300,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv32i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: ; RV32: # %bb.0: # %entry @@ -2824,13 +2327,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv32i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8: ; RV32: # %bb.0: # %entry @@ -2860,11 +2356,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.nxv64i8( - , - ptr, - iXLen); - define @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, ptr %2) nounwind { ; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: ; RV32: # %bb.0: # %entry @@ -2892,13 +2383,6 @@ entry: ret %b } -declare { , iXLen } @llvm.riscv.vleff.mask.nxv64i8( - , - ptr, - , - iXLen, - iXLen); - define @intrinsic_vleff_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3, ptr %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/rvv/vlm.ll index 7f4b777b06eb0..1cfe1c664acec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vlm.nxv1i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv2i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv4i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv8i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv16i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv32i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlm.nxv64i1(ptr, iXLen); - define @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll index 2df7febfbc18a..d2fba96381bf2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1175,14 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +850,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1271,14 +901,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1296,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1319,14 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1344,12 +952,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1366,14 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1391,12 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1413,14 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1460,14 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1485,12 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1507,14 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll index be9faa8867a78..d76db678ced7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -790,14 +560,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -815,12 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -838,14 +594,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -863,12 +611,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -886,14 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -911,12 +645,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -934,14 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -959,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -982,14 +696,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1007,12 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1030,14 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1055,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1078,14 +764,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1103,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1126,14 +798,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1151,12 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1173,14 +831,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1198,12 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1220,14 +864,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1245,12 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1267,14 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1314,14 +930,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1339,12 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1361,14 +963,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1386,12 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1409,14 +997,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1457,14 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1482,12 +1048,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1505,14 +1065,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1530,12 +1082,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1553,14 +1099,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1116,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1601,14 +1133,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1626,12 +1150,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1649,14 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1674,12 +1184,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1697,14 +1201,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1722,12 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1745,14 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1770,12 +1252,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1793,14 +1269,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1818,12 +1286,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1841,14 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1866,12 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1888,14 +1336,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1913,12 +1353,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1935,14 +1369,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1960,12 +1386,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1982,14 +1402,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2007,12 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2029,14 +1435,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2054,12 +1452,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2076,14 +1468,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2101,12 +1485,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2123,14 +1501,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2148,12 +1518,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2171,14 +1535,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2196,12 +1552,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2219,14 +1569,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2244,12 +1586,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2267,14 +1603,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2292,12 +1620,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2315,14 +1637,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2340,12 +1654,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2363,14 +1671,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2388,12 +1688,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2411,14 +1705,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2436,12 +1722,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2459,14 +1739,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2484,12 +1756,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2507,14 +1773,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2532,12 +1790,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2555,14 +1807,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1824,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2602,14 +1840,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2627,12 +1857,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2649,14 +1873,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2674,12 +1890,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2696,14 +1906,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2721,12 +1923,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2743,14 +1939,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2768,12 +1956,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2790,14 +1972,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2815,12 +1989,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2837,14 +2005,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2862,12 +2022,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2885,14 +2039,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2910,12 +2056,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2933,14 +2073,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2958,12 +2090,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2981,14 +2107,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3006,12 +2124,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3029,14 +2141,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3054,12 +2158,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3077,14 +2175,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3102,12 +2192,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3125,14 +2209,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3150,12 +2226,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3173,14 +2243,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3198,12 +2260,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3221,14 +2277,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3246,12 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3269,14 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3294,12 +2328,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3316,14 +2344,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3341,12 +2361,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3363,14 +2377,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3388,12 +2394,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3410,14 +2410,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3435,12 +2427,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3457,14 +2443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,12 +2460,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3504,14 +2476,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3529,12 +2493,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3551,14 +2509,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3576,12 +2526,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3598,14 +2542,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3623,12 +2559,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3646,14 +2576,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3671,12 +2593,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3694,14 +2610,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3719,12 +2627,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3742,14 +2644,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3767,12 +2661,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3790,14 +2678,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3815,12 +2695,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3838,14 +2712,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3863,12 +2729,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3886,14 +2746,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3911,12 +2763,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3934,14 +2780,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3959,12 +2797,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3982,14 +2814,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4007,12 +2831,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4030,14 +2848,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4055,12 +2865,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4078,14 +2882,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4103,12 +2899,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4126,14 +2916,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4151,12 +2933,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4174,14 +2950,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4199,12 +2967,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4222,14 +2984,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,12 +3001,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4270,14 +3018,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4295,12 +3035,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4318,14 +3052,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4343,12 +3069,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4366,14 +3086,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4391,12 +3103,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4414,14 +3120,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4439,12 +3137,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4462,14 +3154,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4487,12 +3171,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4510,14 +3188,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4535,12 +3205,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4558,14 +3222,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4583,12 +3239,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4606,14 +3256,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4631,12 +3273,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4654,14 +3290,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4679,12 +3307,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4702,14 +3324,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4727,12 +3341,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4750,14 +3358,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4775,12 +3375,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4798,14 +3392,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4823,12 +3409,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4846,14 +3426,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4871,12 +3443,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4894,14 +3460,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4919,12 +3477,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4942,14 +3494,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4967,12 +3511,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4990,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5015,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5038,14 +3562,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5063,12 +3579,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5086,14 +3596,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5111,12 +3613,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5134,14 +3630,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5159,12 +3647,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5182,14 +3664,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5207,12 +3681,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5230,14 +3698,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5255,12 +3715,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5278,14 +3732,6 @@ entry: ret %a } -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll index 6d70d191ba8b6..e6b972dd40c79 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,9 +28,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -60,9 +54,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -89,9 +80,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -118,9 +106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -147,9 +132,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -176,9 +158,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -205,9 +184,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,9 +210,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -263,9 +236,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -292,9 +262,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -321,9 +288,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -350,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -379,9 +340,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -408,9 +366,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -437,9 +392,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -466,9 +418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -495,9 +444,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -524,9 +470,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -553,9 +496,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -582,9 +522,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -611,9 +548,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -640,9 +574,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -669,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -698,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -727,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -756,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -785,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -814,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -843,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -872,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -901,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -930,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -959,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -988,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1017,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1046,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1075,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1104,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1133,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1162,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1191,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1220,9 +1094,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1249,9 +1120,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1278,9 +1146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1307,9 +1172,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1336,9 +1198,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1365,9 +1224,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1394,9 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1423,9 +1276,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1302,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1481,9 +1328,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1354,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1539,9 +1380,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1568,9 +1406,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1597,9 +1432,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1626,9 +1458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1655,9 +1484,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1684,9 +1510,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1713,9 +1536,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,9 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1588,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1800,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1829,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1858,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1887,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1916,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1945,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1974,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2003,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2061,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2090,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2119,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2148,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2206,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2235,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2264,9 +2030,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2293,9 +2056,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2322,9 +2082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2351,9 +2108,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2380,9 +2134,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2409,9 +2160,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2438,9 +2186,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2467,9 +2212,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2496,9 +2238,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2525,9 +2264,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2554,9 +2290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2583,9 +2316,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2612,9 +2342,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2641,9 +2368,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2670,9 +2394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2699,9 +2420,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2728,9 +2446,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2757,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2786,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2815,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2844,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2873,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2931,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2960,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2989,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3018,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3047,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3076,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3105,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3134,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3163,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3192,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3221,9 +2888,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3250,9 +2914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2940,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3308,9 +2966,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3337,9 +2992,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3366,9 +3018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3395,9 +3044,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3424,9 +3070,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3453,9 +3096,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,9 +3122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3511,9 +3148,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3540,9 +3174,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3569,9 +3200,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3598,9 +3226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3252,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3656,9 +3278,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3685,9 +3304,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3714,9 +3330,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3743,9 +3356,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3772,9 +3382,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3801,9 +3408,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3830,9 +3434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3859,9 +3460,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3888,9 +3486,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3917,9 +3512,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3946,9 +3538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3975,9 +3564,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3590,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4033,9 +3616,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4062,9 +3642,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4091,9 +3668,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4120,9 +3694,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4149,9 +3720,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4178,9 +3746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4207,9 +3772,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4236,9 +3798,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4265,9 +3824,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4294,9 +3850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4323,9 +3876,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3902,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4381,9 +3928,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4410,9 +3954,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4439,9 +3980,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4468,9 +4006,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4497,9 +4032,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4526,9 +4058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4555,9 +4084,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4584,9 +4110,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4613,9 +4136,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4642,9 +4162,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4671,9 +4188,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4700,9 +4214,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4240,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4758,9 +4266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4787,9 +4292,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4816,9 +4318,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4845,9 +4344,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4874,9 +4370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4903,9 +4396,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4932,9 +4422,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4961,9 +4448,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4990,9 +4474,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5019,9 +4500,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5048,9 +4526,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4552,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5106,9 +4578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5135,9 +4604,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5164,9 +4630,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5193,9 +4656,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5222,9 +4682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5251,9 +4708,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5280,9 +4734,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5309,9 +4760,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5338,9 +4786,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5367,9 +4812,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5396,9 +4838,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5425,9 +4864,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5483,9 +4916,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5512,9 +4942,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5541,9 +4968,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5570,9 +4994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5599,9 +5020,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5628,9 +5046,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5657,9 +5072,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5686,9 +5098,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5715,9 +5124,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5744,9 +5150,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5773,9 +5176,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5831,9 +5228,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5860,9 +5254,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5889,9 +5280,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5918,9 +5306,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5947,9 +5332,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5976,9 +5358,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6005,9 +5384,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6034,9 +5410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6063,9 +5436,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6092,9 +5462,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6121,9 +5488,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6150,9 +5514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5540,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6208,9 +5566,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6237,9 +5592,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6266,9 +5618,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6295,9 +5644,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6324,9 +5670,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6353,9 +5696,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6382,9 +5722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6411,9 +5748,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6440,9 +5774,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6469,9 +5800,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6498,9 +5826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5852,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6556,9 +5878,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6585,9 +5904,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6614,9 +5930,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6643,9 +5956,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6672,9 +5982,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6701,9 +6008,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6730,9 +6034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6759,9 +6060,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6788,9 +6086,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6817,9 +6112,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6846,9 +6138,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6875,9 +6164,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6190,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6933,9 +6216,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6962,9 +6242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6991,9 +6268,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7020,9 +6294,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7049,9 +6320,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7078,9 +6346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7107,9 +6372,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7136,9 +6398,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7165,9 +6424,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7194,9 +6450,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7223,9 +6476,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6502,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7281,9 +6528,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7310,9 +6554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7339,9 +6580,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7368,9 +6606,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7397,9 +6632,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7426,9 +6658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7455,7 +6684,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7482,7 +6710,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7509,7 +6736,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7536,7 +6762,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7563,7 +6788,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7590,7 +6814,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7617,7 +6840,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7644,7 +6866,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7671,7 +6892,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7698,7 +6918,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7725,7 +6944,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7752,7 +6970,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7779,7 +6996,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -7806,7 +7022,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -7833,7 +7048,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -7860,7 +7074,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7887,7 +7100,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7914,7 +7126,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7941,7 +7152,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7178,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7995,7 +7204,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8022,7 +7230,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8049,7 +7256,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8076,7 +7282,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8103,7 +7308,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8130,7 +7334,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8157,7 +7360,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8184,7 +7386,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8211,7 +7412,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8238,7 +7438,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8265,7 +7464,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8292,7 +7490,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8319,7 +7516,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8346,7 +7542,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8373,7 +7568,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8400,7 +7594,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8427,7 +7620,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8454,7 +7646,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8481,7 +7672,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8508,7 +7698,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8535,7 +7724,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8562,7 +7750,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7776,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8616,7 +7802,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8643,7 +7828,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8670,7 +7854,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8697,7 +7880,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8724,7 +7906,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8751,7 +7932,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8778,7 +7958,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8805,7 +7984,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8832,7 +8010,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8859,7 +8036,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8886,7 +8062,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8913,7 +8088,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8940,7 +8114,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8967,7 +8140,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8994,7 +8166,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9021,7 +8192,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9048,7 +8218,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9075,7 +8244,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9102,7 +8270,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9129,7 +8296,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9156,7 +8322,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9183,7 +8348,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8374,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9237,7 +8400,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9264,7 +8426,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9291,7 +8452,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9318,7 +8478,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9345,7 +8504,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9372,7 +8530,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9399,7 +8556,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9426,7 +8582,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9453,7 +8608,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9480,7 +8634,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9507,7 +8660,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9534,7 +8686,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9561,7 +8712,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9588,7 +8738,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9615,7 +8764,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9642,7 +8790,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9669,7 +8816,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9696,7 +8842,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9723,7 +8868,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9750,7 +8894,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9777,7 +8920,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9804,7 +8946,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8972,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9858,7 +8998,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9885,7 +9024,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9912,7 +9050,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9939,7 +9076,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9966,7 +9102,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9993,7 +9128,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10020,7 +9154,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10047,7 +9180,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10074,7 +9206,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10101,7 +9232,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10128,7 +9258,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10155,7 +9284,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10182,7 +9310,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10209,7 +9336,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10236,7 +9362,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10263,7 +9388,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10290,7 +9414,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10317,7 +9440,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10344,7 +9466,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10371,7 +9492,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10398,7 +9518,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10425,7 +9544,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9570,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10479,7 +9596,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10506,7 +9622,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10533,7 +9648,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10560,7 +9674,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10587,7 +9700,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9726,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10641,7 +9752,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9778,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10695,7 +9804,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10722,7 +9830,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10749,7 +9856,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10776,7 +9882,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10803,7 +9908,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10830,7 +9934,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10857,7 +9960,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10884,7 +9986,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10911,7 +10012,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10938,7 +10038,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10965,7 +10064,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10992,7 +10090,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11019,7 +10116,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11046,7 +10142,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10168,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11100,7 +10194,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11127,7 +10220,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11154,7 +10246,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11181,7 +10272,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11208,7 +10298,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10324,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11262,7 +10350,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10376,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11316,7 +10402,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11343,7 +10428,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11370,7 +10454,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11397,7 +10480,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11424,7 +10506,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11451,7 +10532,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11478,7 +10558,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11505,7 +10584,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11532,7 +10610,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11559,7 +10636,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11586,7 +10662,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11613,7 +10688,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11640,7 +10714,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11667,7 +10740,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10766,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11721,7 +10792,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11748,7 +10818,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11775,7 +10844,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11802,7 +10870,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11829,7 +10896,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10922,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11883,7 +10948,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10974,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11937,7 +11000,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11964,7 +11026,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11991,7 +11052,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12018,7 +11078,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12045,7 +11104,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12072,7 +11130,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12099,7 +11156,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12126,7 +11182,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12153,7 +11208,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -12180,7 +11234,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -12207,7 +11260,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -12234,7 +11286,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12261,7 +11312,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12288,7 +11338,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12315,7 +11364,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12342,7 +11390,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12369,7 +11416,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12396,7 +11442,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12423,7 +11468,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12450,7 +11494,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11520,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12504,7 +11546,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11572,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12558,7 +11598,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12585,7 +11624,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12612,7 +11650,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12639,7 +11676,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12666,7 +11702,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12693,7 +11728,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12720,7 +11754,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12747,7 +11780,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12774,7 +11806,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12801,7 +11832,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12828,7 +11858,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12855,7 +11884,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12882,7 +11910,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12909,7 +11936,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12936,7 +11962,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12963,7 +11988,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12990,7 +12014,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13017,7 +12040,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13044,7 +12066,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13071,7 +12092,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13098,7 +12118,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13125,7 +12144,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13152,7 +12170,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13179,7 +12196,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13206,7 +12222,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13233,7 +12248,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13260,7 +12274,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13287,7 +12300,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13314,7 +12326,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13341,7 +12352,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13368,7 +12378,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13395,7 +12404,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13422,7 +12430,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13449,7 +12456,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13476,7 +12482,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13503,7 +12508,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13530,7 +12534,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13557,7 +12560,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13584,7 +12586,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13611,7 +12612,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13638,7 +12638,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13665,7 +12664,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13692,7 +12690,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12716,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13746,7 +12742,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12768,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13800,7 +12794,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13827,7 +12820,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll index 8f85eb5638255..dcd7ca608f672 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,9 +28,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -60,9 +54,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -89,9 +80,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -118,9 +106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -147,9 +132,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -176,9 +158,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -205,9 +184,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -234,9 +210,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -263,9 +236,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -292,9 +262,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -321,9 +288,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -350,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -379,9 +340,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -408,9 +366,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -437,9 +392,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -466,9 +418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -495,9 +444,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -524,9 +470,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -553,9 +496,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -582,9 +522,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -611,9 +548,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -640,9 +574,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -669,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -698,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -727,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -756,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -785,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -814,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -843,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -872,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -901,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -930,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -959,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -988,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1017,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1046,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1075,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1104,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1133,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1162,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1191,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1220,9 +1094,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,9 +1120,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1278,9 +1146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1307,9 +1172,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1336,9 +1198,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1365,9 +1224,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1394,9 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1423,9 +1276,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1302,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1481,9 +1328,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1354,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1539,9 +1380,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1568,9 +1406,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1597,9 +1432,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1626,9 +1458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1655,9 +1484,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1684,9 +1510,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1713,9 +1536,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,9 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1588,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1800,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1829,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1858,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1887,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1916,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1945,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1974,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2003,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2061,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2090,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2119,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2148,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2206,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2235,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2264,9 +2030,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2293,9 +2056,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2322,9 +2082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2351,9 +2108,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2380,9 +2134,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2409,9 +2160,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2438,9 +2186,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2467,9 +2212,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2496,9 +2238,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2525,9 +2264,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2554,9 +2290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2583,9 +2316,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2612,9 +2342,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2641,9 +2368,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2670,9 +2394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2699,9 +2420,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2728,9 +2446,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2757,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2786,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2815,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2844,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2873,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2931,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2960,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2989,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3018,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3047,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3076,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3105,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3134,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3163,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3192,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3221,9 +2888,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3250,9 +2914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2940,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3308,9 +2966,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3337,9 +2992,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3366,9 +3018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3395,9 +3044,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3424,9 +3070,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3453,9 +3096,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,9 +3122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3511,9 +3148,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3540,9 +3174,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3569,9 +3200,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3598,9 +3226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3252,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3656,9 +3278,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3685,9 +3304,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3714,9 +3330,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3743,9 +3356,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3772,9 +3382,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3801,9 +3408,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3830,9 +3434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3859,9 +3460,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3888,9 +3486,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3917,9 +3512,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3946,9 +3538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3975,9 +3564,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3590,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4033,9 +3616,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4062,9 +3642,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -4091,9 +3668,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4120,9 +3694,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4149,9 +3720,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4178,9 +3746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4207,9 +3772,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4236,9 +3798,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4265,9 +3824,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4294,9 +3850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4323,9 +3876,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3902,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4381,9 +3928,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4410,9 +3954,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4439,9 +3980,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4468,9 +4006,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4497,9 +4032,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4526,9 +4058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4555,9 +4084,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4584,9 +4110,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4613,9 +4136,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4642,9 +4162,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4671,9 +4188,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4700,9 +4214,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4240,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4758,9 +4266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4787,9 +4292,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4816,9 +4318,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4845,9 +4344,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4874,9 +4370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4903,9 +4396,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4932,9 +4422,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4961,9 +4448,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4990,9 +4474,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5019,9 +4500,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -5048,9 +4526,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4552,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5106,9 +4578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5135,9 +4604,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5164,9 +4630,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5193,9 +4656,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5222,9 +4682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5251,9 +4708,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5280,9 +4734,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5309,9 +4760,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5338,9 +4786,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5367,9 +4812,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5396,9 +4838,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5425,9 +4864,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5483,9 +4916,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5512,9 +4942,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5541,9 +4968,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5570,9 +4994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5599,9 +5020,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5628,9 +5046,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5657,9 +5072,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5686,9 +5098,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5715,9 +5124,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5744,9 +5150,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5773,9 +5176,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5831,9 +5228,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5860,9 +5254,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5889,9 +5280,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5918,9 +5306,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5947,9 +5332,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5976,9 +5358,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6005,9 +5384,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6034,9 +5410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6063,9 +5436,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6092,9 +5462,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6121,9 +5488,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6150,9 +5514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5540,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6208,9 +5566,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6237,9 +5592,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6266,9 +5618,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6295,9 +5644,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6324,9 +5670,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6353,9 +5696,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6382,9 +5722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6411,9 +5748,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6440,9 +5774,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6469,9 +5800,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6498,9 +5826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5852,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6556,9 +5878,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6585,9 +5904,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6614,9 +5930,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6643,9 +5956,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6672,9 +5982,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6701,9 +6008,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6730,9 +6034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6759,9 +6060,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6788,9 +6086,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6817,9 +6112,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6846,9 +6138,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6875,9 +6164,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6190,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6933,9 +6216,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6962,9 +6242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6991,9 +6268,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7020,9 +6294,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7049,9 +6320,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7078,9 +6346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7107,9 +6372,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7136,9 +6398,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7165,9 +6424,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7194,9 +6450,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7223,9 +6476,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6502,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7281,9 +6528,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7310,9 +6554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7339,9 +6580,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7368,9 +6606,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7397,9 +6632,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7426,9 +6658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7455,9 +6684,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7484,9 +6710,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7513,9 +6736,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7542,9 +6762,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7571,9 +6788,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7600,9 +6814,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7629,9 +6840,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7658,9 +6866,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7687,9 +6892,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7716,9 +6918,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7745,9 +6944,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7774,9 +6970,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7803,9 +6996,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7832,9 +7022,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7861,9 +7048,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7890,9 +7074,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7919,9 +7100,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7948,9 +7126,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7977,9 +7152,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8006,9 +7178,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8035,9 +7204,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8064,9 +7230,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8093,9 +7256,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8122,9 +7282,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8151,9 +7308,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8180,9 +7334,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8209,9 +7360,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8238,9 +7386,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8267,9 +7412,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8296,9 +7438,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8325,9 +7464,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8354,9 +7490,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8383,9 +7516,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8412,9 +7542,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8441,9 +7568,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8470,9 +7594,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8499,9 +7620,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8528,9 +7646,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8557,9 +7672,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8586,9 +7698,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8615,9 +7724,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8644,9 +7750,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8673,9 +7776,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8702,9 +7802,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8731,9 +7828,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8760,9 +7854,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8789,9 +7880,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8818,9 +7906,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8847,9 +7932,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8876,9 +7958,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8905,9 +7984,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8934,9 +8010,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8963,9 +8036,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8992,9 +8062,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9021,9 +8088,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9050,9 +8114,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9079,9 +8140,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9108,9 +8166,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9137,9 +8192,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9166,9 +8218,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9195,9 +8244,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9224,9 +8270,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9253,9 +8296,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9282,9 +8322,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9311,9 +8348,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9340,9 +8374,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9369,9 +8400,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9398,9 +8426,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9427,9 +8452,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9456,9 +8478,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9485,9 +8504,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9514,9 +8530,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9543,9 +8556,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9572,9 +8582,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9601,9 +8608,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9630,9 +8634,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9659,9 +8660,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9688,9 +8686,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9717,9 +8712,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9746,9 +8738,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9775,9 +8764,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9804,7 +8790,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8816,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9858,7 +8842,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9885,7 +8868,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9912,7 +8894,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9939,7 +8920,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9966,7 +8946,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9993,7 +8972,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10020,7 +8998,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10047,7 +9024,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10074,7 +9050,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10101,7 +9076,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10128,7 +9102,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10155,7 +9128,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10182,7 +9154,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10209,7 +9180,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10236,7 +9206,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10263,7 +9232,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10290,7 +9258,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10317,7 +9284,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10344,7 +9310,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10371,7 +9336,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10398,7 +9362,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10425,7 +9388,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9414,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10479,7 +9440,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10506,7 +9466,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10533,7 +9492,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10560,7 +9518,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10587,7 +9544,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9570,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10641,7 +9596,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9622,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10695,7 +9648,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10722,7 +9674,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10749,7 +9700,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10776,7 +9726,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10803,7 +9752,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10830,7 +9778,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10857,7 +9804,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10884,7 +9830,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10911,7 +9856,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10938,7 +9882,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10965,7 +9908,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10992,7 +9934,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11019,7 +9960,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11046,7 +9986,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10012,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11100,7 +10038,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11127,7 +10064,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11154,7 +10090,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11181,7 +10116,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11208,7 +10142,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10168,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11262,7 +10194,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10220,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11316,7 +10246,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11343,7 +10272,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11370,7 +10298,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11397,7 +10324,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11424,7 +10350,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11451,7 +10376,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11478,7 +10402,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11505,7 +10428,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11532,7 +10454,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11559,7 +10480,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11586,7 +10506,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11613,7 +10532,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11640,7 +10558,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11667,7 +10584,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10610,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11721,7 +10636,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11748,7 +10662,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11775,7 +10688,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11802,7 +10714,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11829,7 +10740,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10766,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11883,7 +10792,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10818,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11937,7 +10844,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11964,7 +10870,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11991,7 +10896,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12018,7 +10922,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12045,7 +10948,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12072,7 +10974,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12099,7 +11000,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12126,7 +11026,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12153,7 +11052,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12180,7 +11078,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12207,7 +11104,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12234,7 +11130,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12261,7 +11156,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12288,7 +11182,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12315,7 +11208,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12342,7 +11234,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12369,7 +11260,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12396,7 +11286,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12423,7 +11312,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12450,7 +11338,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11364,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12504,7 +11390,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11416,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12558,7 +11442,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12585,7 +11468,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12612,7 +11494,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12639,7 +11520,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12666,7 +11546,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12693,7 +11572,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12720,7 +11598,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12747,7 +11624,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12774,7 +11650,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12801,7 +11676,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12828,7 +11702,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12855,7 +11728,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12882,7 +11754,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -12909,7 +11780,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12936,7 +11806,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12963,7 +11832,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12990,7 +11858,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13017,7 +11884,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13044,7 +11910,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13071,7 +11936,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13098,7 +11962,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13125,7 +11988,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13152,7 +12014,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13179,7 +12040,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13206,7 +12066,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13233,7 +12092,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13260,7 +12118,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13287,7 +12144,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13314,7 +12170,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13341,7 +12196,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13368,7 +12222,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13395,7 +12248,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13422,7 +12274,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13449,7 +12300,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13476,7 +12326,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13503,7 +12352,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13530,7 +12378,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13557,7 +12404,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13584,7 +12430,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13611,7 +12456,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13638,7 +12482,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13665,7 +12508,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13692,7 +12534,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12560,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13746,7 +12586,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12612,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13800,7 +12638,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13827,7 +12664,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13854,7 +12690,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13881,7 +12716,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13908,7 +12742,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13935,7 +12768,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13962,7 +12794,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13989,7 +12820,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14016,7 +12846,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14043,7 +12872,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14070,7 +12898,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14097,7 +12924,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14124,7 +12950,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14151,7 +12976,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14178,7 +13002,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14205,7 +13028,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14232,7 +13054,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14259,7 +13080,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14286,7 +13106,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14313,7 +13132,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14340,7 +13158,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14367,7 +13184,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14394,7 +13210,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14421,7 +13236,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14448,7 +13262,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14475,7 +13288,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14502,7 +13314,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14529,7 +13340,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14556,7 +13366,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14583,7 +13392,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14610,7 +13418,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14637,7 +13444,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14664,7 +13470,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14691,7 +13496,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14718,7 +13522,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14745,7 +13548,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14772,7 +13574,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14799,7 +13600,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14826,7 +13626,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14853,7 +13652,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14880,7 +13678,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14907,7 +13704,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14934,7 +13730,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14961,7 +13756,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14988,7 +13782,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15015,7 +13808,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15042,7 +13834,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15069,7 +13860,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15096,7 +13886,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15123,7 +13912,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15150,7 +13938,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15177,7 +13964,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15204,7 +13990,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15231,7 +14016,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15258,7 +14042,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15285,7 +14068,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15312,7 +14094,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15339,7 +14120,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15366,7 +14146,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15393,7 +14172,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15420,7 +14198,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15447,7 +14224,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15474,7 +14250,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15501,7 +14276,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15528,7 +14302,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15555,7 +14328,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15582,7 +14354,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15609,7 +14380,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15636,7 +14406,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15663,7 +14432,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15690,7 +14458,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15717,7 +14484,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15744,7 +14510,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15771,7 +14536,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15798,7 +14562,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15825,7 +14588,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15852,7 +14614,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15879,7 +14640,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15906,7 +14666,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15933,7 +14692,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -15960,7 +14718,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -15987,7 +14744,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16014,7 +14770,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16041,7 +14796,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -16068,7 +14822,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -16095,7 +14848,6 @@ entry: ret %1 } - define @test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -16122,7 +14874,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16149,7 +14900,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16176,7 +14926,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16203,7 +14952,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16230,7 +14978,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16257,7 +15004,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16284,7 +15030,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16311,7 +15056,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16338,7 +15082,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16365,7 +15108,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16392,7 +15134,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16419,7 +15160,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16446,7 +15186,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16473,7 +15212,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16500,7 +15238,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16527,7 +15264,6 @@ entry: ret %1 } - define @test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16554,7 +15290,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16581,7 +15316,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16608,7 +15342,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16635,7 +15368,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16662,7 +15394,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16689,7 +15420,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16716,7 +15446,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16743,7 +15472,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16770,7 +15498,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16797,7 +15524,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16824,7 +15550,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16851,7 +15576,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16878,7 +15602,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16905,7 +15628,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16932,7 +15654,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16959,7 +15680,6 @@ entry: ret %1 } - define @test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16986,7 +15706,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17013,7 +15732,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17040,7 +15758,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17067,7 +15784,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17094,7 +15810,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17121,7 +15836,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17148,7 +15862,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17175,7 +15888,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17202,7 +15914,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17229,7 +15940,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17256,7 +15966,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17283,7 +15992,6 @@ entry: ret %1 } - define @test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17310,7 +16018,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17337,7 +16044,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17364,7 +16070,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17391,7 +16096,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17418,7 +16122,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17445,7 +16148,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17472,7 +16174,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17499,7 +16200,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17526,7 +16226,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17553,7 +16252,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17580,7 +16278,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17607,7 +16304,6 @@ entry: ret %1 } - define @test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17634,7 +16330,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17661,7 +16356,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17688,7 +16382,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17715,7 +16408,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17742,7 +16434,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17769,7 +16460,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17796,7 +16486,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17823,7 +16512,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17850,7 +16538,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17877,7 +16564,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17904,7 +16590,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17931,7 +16616,6 @@ entry: ret %1 } - define @test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17958,7 +16642,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17985,7 +16668,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -18012,7 +16694,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -18039,7 +16720,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -18066,7 +16746,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -18093,7 +16772,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -18120,7 +16798,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -18147,7 +16824,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -18174,7 +16850,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -18201,7 +16876,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -18228,7 +16902,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -18255,7 +16928,6 @@ entry: ret %1 } - define @test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/rvv/vlse.ll index ac7be3021e633..f7c18141f3abf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vlse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -709,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -731,14 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -756,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -778,14 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,14 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -850,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -872,14 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -897,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -919,14 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -944,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -966,14 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -991,12 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1013,14 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1038,12 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1060,14 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1085,12 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1107,14 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1f16_nxv1f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1154,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1179,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2f16_nxv2f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1201,14 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1226,12 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4f16_nxv4f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1248,14 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1273,12 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8f16_nxv8f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1295,14 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1320,12 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16f16_nxv16f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1342,14 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1367,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32f16_nxv32f16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1389,14 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1414,12 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1436,14 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1461,12 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1483,14 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1508,12 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1530,14 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1555,12 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1577,14 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1602,12 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1624,14 +1142,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1649,12 +1159,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32bf16( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1671,14 +1175,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32bf16( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1696,12 +1192,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1718,14 +1208,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1743,12 +1225,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1765,14 +1241,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1790,12 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1812,14 +1274,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1837,12 +1291,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1859,14 +1307,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1884,12 +1324,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1906,14 +1340,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1931,12 +1357,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1953,14 +1373,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1978,12 +1390,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -2000,14 +1406,6 @@ entry: ret %a } -declare @llvm.riscv.vlse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll index 1a5574cae96f6..7a25753e2cab9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64x \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define void @test_vlseg2ff_dead_value(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll index 9086144f85667..d6ece0e8ef1fb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -41,9 +38,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -80,9 +74,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -119,9 +110,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -158,9 +146,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -197,9 +182,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -236,9 +218,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -276,9 +255,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -316,9 +292,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -356,9 +329,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -396,9 +366,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -436,9 +403,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -477,9 +441,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -518,9 +479,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -559,9 +517,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -600,9 +555,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -641,9 +593,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -683,9 +632,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -725,9 +671,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -767,9 +710,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -809,9 +749,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -852,9 +789,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -895,9 +829,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -938,9 +869,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -981,9 +909,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -1025,9 +950,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1069,9 +991,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1113,9 +1032,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1157,9 +1073,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1115,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1247,9 +1157,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1292,9 +1199,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1337,8 +1241,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -1375,8 +1277,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1413,8 +1313,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1451,8 +1349,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1489,8 +1385,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1527,8 +1421,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1566,8 +1458,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1605,8 +1495,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1644,8 +1532,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1683,8 +1569,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1723,8 +1607,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1763,8 +1645,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1803,8 +1683,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1843,8 +1721,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1884,8 +1760,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1925,8 +1799,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1966,8 +1838,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2008,8 +1878,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1918,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2092,8 +1958,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2135,8 +1999,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2178,8 +2040,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2221,8 +2081,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2265,8 +2123,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2309,8 +2165,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2353,8 +2207,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2391,8 +2243,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2429,8 +2279,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2467,8 +2315,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2505,8 +2351,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2544,8 +2388,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2583,8 +2425,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2622,8 +2462,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2662,8 +2500,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2702,8 +2538,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2742,8 +2576,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2783,8 +2615,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2824,8 +2654,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2866,8 +2694,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2908,8 +2734,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2951,8 +2775,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2994,8 +2816,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3038,8 +2858,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3082,8 +2900,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3120,8 +2936,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3158,8 +2972,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32, i32) - define @test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3196,8 +3008,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3235,8 +3045,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i32} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32, i32) - define @test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3274,8 +3082,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3314,8 +3120,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i32} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32, i32) - define @test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3354,8 +3158,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i32} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32, i32) - define @test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3395,8 +3197,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i32} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32, i32) - define @test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3437,8 +3237,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i32} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32, i32) - define @test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3480,8 +3278,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i32} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32, i32) - define @test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3320,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3561,7 +3356,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3598,7 +3392,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3635,7 +3428,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3672,7 +3464,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3709,7 +3500,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3747,7 +3537,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3785,7 +3574,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3611,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3861,7 +3648,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3900,7 +3686,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3939,7 +3724,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3978,7 +3762,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4017,7 +3800,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -4057,7 +3839,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4097,7 +3878,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4137,7 +3917,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4178,7 +3957,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4219,7 +3997,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4260,7 +4037,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4302,7 +4078,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4344,7 +4119,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4386,7 +4160,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4429,7 +4202,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4472,7 +4244,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -4515,7 +4286,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -4552,7 +4322,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -4589,7 +4358,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -4626,7 +4394,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -4663,7 +4430,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -4701,7 +4467,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -4739,7 +4504,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -4777,7 +4541,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -4816,7 +4579,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -4855,7 +4617,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4894,7 +4655,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4934,7 +4694,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4974,7 +4733,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -5015,7 +4773,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5056,7 +4813,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -5098,7 +4854,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5140,7 +4895,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -5183,7 +4937,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5226,7 +4979,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5263,7 +5015,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5300,7 +5051,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5337,7 +5087,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5375,7 +5124,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5413,7 +5161,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -5452,7 +5199,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -5491,7 +5237,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -5531,7 +5276,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5572,7 +5316,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5614,7 +5357,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5657,7 +5399,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -5694,7 +5435,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -5731,7 +5471,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5768,7 +5507,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5805,7 +5543,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5842,7 +5579,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -5880,7 +5616,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -5918,7 +5653,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5956,7 +5690,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5994,7 +5727,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -6033,7 +5765,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -6072,7 +5803,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -6111,7 +5841,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -6150,7 +5879,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -6190,7 +5918,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -6230,7 +5957,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -6270,7 +5996,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -6311,7 +6036,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -6352,7 +6076,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -6393,7 +6116,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -6435,7 +6157,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -6477,7 +6198,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -6519,7 +6239,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -6562,7 +6281,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -6605,7 +6323,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll index 813208c534e31..a6100d9737010 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64x \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define void @test_vlseg2ff_dead_value(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll index 7ddae4293c29f..1f763ce6b2474 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -41,9 +38,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -80,9 +74,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -119,9 +110,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -158,9 +146,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -197,9 +182,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -236,9 +218,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -276,9 +255,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -316,9 +292,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -356,9 +329,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -396,9 +366,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -436,9 +403,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -477,9 +441,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -518,9 +479,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -559,9 +517,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -600,9 +555,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -641,9 +593,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -683,9 +632,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -725,9 +671,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -767,9 +710,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -809,9 +749,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -852,9 +789,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -895,9 +829,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -938,9 +869,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -981,9 +909,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -1025,9 +950,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1069,9 +991,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1113,9 +1032,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1157,9 +1073,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1115,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1247,9 +1157,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1292,9 +1199,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1337,8 +1241,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -1375,8 +1277,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1413,8 +1313,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1451,8 +1349,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1489,8 +1385,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1527,8 +1421,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1566,8 +1458,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1605,8 +1495,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1644,8 +1532,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1683,8 +1569,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1723,8 +1607,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1763,8 +1645,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1803,8 +1683,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1843,8 +1721,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1884,8 +1760,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1925,8 +1799,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1966,8 +1838,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2008,8 +1878,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1918,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2092,8 +1958,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2135,8 +1999,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2178,8 +2040,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2221,8 +2081,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2265,8 +2123,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2309,8 +2165,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2353,8 +2207,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2391,8 +2243,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2429,8 +2279,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2467,8 +2315,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2505,8 +2351,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2544,8 +2388,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2583,8 +2425,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2622,8 +2462,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2662,8 +2500,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2702,8 +2538,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2742,8 +2576,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2783,8 +2615,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2824,8 +2654,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2866,8 +2694,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2908,8 +2734,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2951,8 +2775,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2994,8 +2816,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3038,8 +2858,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3082,8 +2900,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3120,8 +2936,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3158,8 +2972,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3196,8 +3008,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3235,8 +3045,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 3), i64} @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) - define @test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3274,8 +3082,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3314,8 +3120,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 4), i64} @llvm.riscv.vlseg4ff.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) - define @test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3354,8 +3158,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 5), i64} @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) - define @test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3395,8 +3197,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 6), i64} @llvm.riscv.vlseg6ff.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) - define @test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3437,8 +3237,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 7), i64} @llvm.riscv.vlseg7ff.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) - define @test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3480,8 +3278,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 8), i64} @llvm.riscv.vlseg8ff.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) - define @test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3320,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3561,7 +3356,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3598,7 +3392,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3635,7 +3428,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3672,7 +3464,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3709,7 +3500,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3747,7 +3537,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3785,7 +3574,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3611,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3861,7 +3648,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3900,7 +3686,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3939,7 +3724,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3978,7 +3762,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4017,7 +3800,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -4057,7 +3839,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4097,7 +3878,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4137,7 +3917,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4178,7 +3957,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4219,7 +3997,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4260,7 +4037,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4302,7 +4078,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4344,7 +4119,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4386,7 +4160,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4429,7 +4202,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4472,7 +4244,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -4515,7 +4286,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -4552,7 +4322,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -4589,7 +4358,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -4626,7 +4394,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -4663,7 +4430,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -4701,7 +4467,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -4739,7 +4504,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -4777,7 +4541,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -4816,7 +4579,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -4855,7 +4617,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -4894,7 +4655,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -4934,7 +4694,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4974,7 +4733,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -5015,7 +4773,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5056,7 +4813,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -5098,7 +4854,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5140,7 +4895,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -5183,7 +4937,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5226,7 +4979,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5263,7 +5015,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5300,7 +5051,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5337,7 +5087,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5375,7 +5124,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5413,7 +5161,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -5452,7 +5199,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -5491,7 +5237,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -5531,7 +5276,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -5572,7 +5316,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -5614,7 +5357,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -5657,7 +5399,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -5694,7 +5435,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -5731,7 +5471,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -5768,7 +5507,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -5805,7 +5543,6 @@ entry: ret %2 } - define @test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -5842,7 +5579,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -5880,7 +5616,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -5918,7 +5653,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -5956,7 +5690,6 @@ entry: ret %2 } - define @test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -5994,7 +5727,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -6033,7 +5765,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -6072,7 +5803,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -6111,7 +5841,6 @@ entry: ret %2 } - define @test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -6150,7 +5879,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -6190,7 +5918,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -6230,7 +5957,6 @@ entry: ret %2 } - define @test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -6270,7 +5996,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -6311,7 +6036,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -6352,7 +6076,6 @@ entry: ret %2 } - define @test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -6393,7 +6116,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -6435,7 +6157,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -6477,7 +6198,6 @@ entry: ret %2 } - define @test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -6519,7 +6239,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -6562,7 +6281,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -6605,7 +6323,6 @@ entry: ret %2 } - define @test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %vl, ptr %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll index 0fa51c56a9a86..3f6b65b99be91 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -29,9 +26,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -56,9 +50,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -83,9 +74,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -110,9 +98,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -137,9 +122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -164,9 +146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -191,9 +170,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -218,9 +194,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -245,9 +218,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -272,9 +242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -299,9 +266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -326,9 +290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -353,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -380,9 +338,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -407,9 +362,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -434,9 +386,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -461,9 +410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -488,9 +434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -515,9 +458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -542,9 +482,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -569,9 +506,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -596,9 +530,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -623,9 +554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -650,9 +578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -704,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -731,9 +650,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -758,9 +674,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -785,9 +698,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -812,9 +722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -839,9 +746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -866,8 +770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -892,8 +794,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -918,8 +818,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -944,8 +842,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -970,8 +866,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -996,8 +890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1022,8 +914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1048,8 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1074,8 +962,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1100,8 +986,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1126,8 +1010,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1152,8 +1034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1178,8 +1058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1230,8 +1106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1256,8 +1130,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1154,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1308,8 +1178,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1334,8 +1202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1360,8 +1226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1386,8 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1412,8 +1274,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1438,8 +1298,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1464,8 +1322,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1490,8 +1346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1516,8 +1370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1542,8 +1394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1568,8 +1418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1442,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1620,8 +1466,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1646,8 +1490,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1672,8 +1514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1698,8 +1538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1724,8 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1750,8 +1586,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1776,8 +1610,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1802,8 +1634,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1828,8 +1658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1854,8 +1682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1880,8 +1706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1932,8 +1754,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1958,8 +1778,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1984,8 +1802,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2010,8 +1826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2036,8 +1850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32, i32) - define @test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2062,8 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2088,8 +1898,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32, i32) - define @test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2114,8 +1922,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2140,8 +1946,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32, i32) - define @test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2166,8 +1970,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32, i32) - define @test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2192,8 +1994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32, i32) - define @test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2218,8 +2018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32, i32) - define @test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2244,8 +2042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32, i32) - define @test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2270,7 +2066,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2295,7 +2090,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2320,7 +2114,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2345,7 +2138,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2370,7 +2162,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2395,7 +2186,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2210,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2445,7 +2234,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2470,7 +2258,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2495,7 +2282,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2520,7 +2306,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2545,7 +2330,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2570,7 +2354,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2595,7 +2378,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2620,7 +2402,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2645,7 +2426,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2670,7 +2450,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2695,7 +2474,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2720,7 +2498,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2745,7 +2522,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2770,7 +2546,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2795,7 +2570,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2820,7 +2594,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2845,7 +2618,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2870,7 +2642,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2895,7 +2666,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2920,7 +2690,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2945,7 +2714,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2970,7 +2738,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2762,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3020,7 +2786,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3045,7 +2810,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3070,7 +2834,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3095,7 +2858,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3120,7 +2882,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3145,7 +2906,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3170,7 +2930,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3195,7 +2954,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3220,7 +2978,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3245,7 +3002,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3270,7 +3026,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3295,7 +3050,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3320,7 +3074,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3345,7 +3098,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3370,7 +3122,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3395,7 +3146,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3420,7 +3170,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3445,7 +3194,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3470,7 +3218,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3495,7 +3242,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3520,7 +3266,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3545,7 +3290,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3314,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3595,7 +3338,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3620,7 +3362,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3645,7 +3386,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3670,7 +3410,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3695,7 +3434,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3720,7 +3458,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3745,7 +3482,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3770,7 +3506,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3795,7 +3530,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3820,7 +3554,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3845,7 +3578,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3870,7 +3602,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3895,7 +3626,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3920,7 +3650,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3945,7 +3674,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3970,7 +3698,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3995,7 +3722,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4020,7 +3746,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4045,7 +3770,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4070,7 +3794,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4095,7 +3818,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4120,7 +3842,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4145,7 +3866,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4170,7 +3890,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4195,7 +3914,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4220,7 +3938,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll index d2cb825f9426c..aa4c3e40a04cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -29,9 +26,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -56,9 +50,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -83,9 +74,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -110,9 +98,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -137,9 +122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -164,9 +146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -191,9 +170,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -218,9 +194,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -245,9 +218,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -272,9 +242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -299,9 +266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -326,9 +290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -353,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -380,9 +338,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -407,9 +362,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -434,9 +386,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -461,9 +410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -488,9 +434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -515,9 +458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -542,9 +482,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -569,9 +506,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -596,9 +530,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -623,9 +554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -650,9 +578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -704,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -731,9 +650,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -758,9 +674,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -785,9 +698,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -812,9 +722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -839,9 +746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -866,8 +770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -892,8 +794,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -918,8 +818,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -944,8 +842,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -970,8 +866,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -996,8 +890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1022,8 +914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1048,8 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1074,8 +962,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1100,8 +986,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1126,8 +1010,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1152,8 +1034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1178,8 +1058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1230,8 +1106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1256,8 +1130,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1154,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1308,8 +1178,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1334,8 +1202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1360,8 +1226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1386,8 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1412,8 +1274,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1438,8 +1298,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1464,8 +1322,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1490,8 +1346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1516,8 +1370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1542,8 +1394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1568,8 +1418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1442,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1620,8 +1466,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1646,8 +1490,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1672,8 +1514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1698,8 +1538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1724,8 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1750,8 +1586,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1776,8 +1610,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1802,8 +1634,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1828,8 +1658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1854,8 +1682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1880,8 +1706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1932,8 +1754,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1958,8 +1778,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1984,8 +1802,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2010,8 +1826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2036,8 +1850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2062,8 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2088,8 +1898,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64, i64) - define @test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2114,8 +1922,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2140,8 +1946,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64, i64) - define @test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2166,8 +1970,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64, i64) - define @test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2192,8 +1994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64, i64) - define @test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2218,8 +2018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64, i64) - define @test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2244,8 +2042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64, i64) - define @test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2270,7 +2066,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2295,7 +2090,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2320,7 +2114,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2345,7 +2138,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2370,7 +2162,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2395,7 +2186,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2210,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2445,7 +2234,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2470,7 +2258,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2495,7 +2282,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2520,7 +2306,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2545,7 +2330,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2570,7 +2354,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2595,7 +2378,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2620,7 +2402,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2645,7 +2426,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2670,7 +2450,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2695,7 +2474,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2720,7 +2498,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2745,7 +2522,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2770,7 +2546,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2795,7 +2570,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2820,7 +2594,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2845,7 +2618,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2870,7 +2642,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2895,7 +2666,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2920,7 +2690,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2945,7 +2714,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2970,7 +2738,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2762,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3020,7 +2786,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3045,7 +2810,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3070,7 +2834,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3095,7 +2858,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3120,7 +2882,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3145,7 +2906,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3170,7 +2930,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3195,7 +2954,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3220,7 +2978,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3245,7 +3002,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3270,7 +3026,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3295,7 +3050,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3320,7 +3074,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3345,7 +3098,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3370,7 +3122,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3395,7 +3146,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3420,7 +3170,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3445,7 +3194,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3470,7 +3218,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3495,7 +3242,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3520,7 +3266,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3545,7 +3290,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3314,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3595,7 +3338,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3620,7 +3362,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3645,7 +3386,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3670,7 +3410,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3695,7 +3434,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3720,7 +3458,6 @@ entry: ret %1 } - define @test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3745,7 +3482,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3770,7 +3506,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3795,7 +3530,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3820,7 +3554,6 @@ entry: ret %1 } - define @test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3845,7 +3578,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3870,7 +3602,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3895,7 +3626,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3920,7 +3650,6 @@ entry: ret %1 } - define @test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3945,7 +3674,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3970,7 +3698,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3995,7 +3722,6 @@ entry: ret %1 } - define @test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -4020,7 +3746,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -4045,7 +3770,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -4070,7 +3794,6 @@ entry: ret %1 } - define @test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -4095,7 +3818,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -4120,7 +3842,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -4145,7 +3866,6 @@ entry: ret %1 } - define @test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -4170,7 +3890,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -4195,7 +3914,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -4220,7 +3938,6 @@ entry: ret %1 } - define @test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll index 474b24c15db80..720e9759e52ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,14 +428,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,12 +445,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -649,14 +461,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -674,12 +478,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -696,14 +494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -721,12 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -743,14 +527,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -768,12 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -791,14 +561,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -816,12 +578,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -839,14 +595,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,12 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -887,14 +629,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -912,12 +646,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -935,14 +663,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -960,12 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -983,14 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1008,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1031,14 +731,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1056,12 +748,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,14 +765,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1104,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1127,14 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1152,12 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1175,14 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +850,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1223,14 +867,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1248,12 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1271,14 +901,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1296,12 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1319,14 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1344,12 +952,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1366,14 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1391,12 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1413,14 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1460,14 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1485,12 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1507,14 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64, - i64); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll index 520b75f30d140..2360cc1f9dd4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -506,14 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -531,12 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -553,14 +393,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,12 +410,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -600,14 +426,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -625,12 +443,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -647,14 +459,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -672,12 +476,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,14 +492,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -719,12 +509,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -742,14 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -767,12 +543,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -790,14 +560,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -815,12 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -838,14 +594,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -863,12 +611,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -886,14 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -911,12 +645,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -934,14 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -959,12 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -982,14 +696,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1007,12 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1030,14 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1055,12 +747,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1078,14 +764,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1103,12 +781,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1126,14 +798,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1151,12 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1174,14 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1199,12 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1222,14 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1247,12 +883,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1270,14 +900,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1295,12 +917,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1318,14 +934,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1343,12 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1366,14 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1391,12 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1413,14 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1460,14 +1034,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1485,12 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1507,14 +1067,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1084,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1554,14 +1100,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1579,12 +1117,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1601,14 +1133,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1626,12 +1150,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1649,14 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1674,12 +1184,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1697,14 +1201,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,12 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1745,14 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1770,12 +1252,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1793,14 +1269,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1818,12 +1286,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1841,14 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1866,12 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1889,14 +1337,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1914,12 +1354,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1937,14 +1371,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1962,12 +1388,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1985,14 +1405,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2010,12 +1422,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2033,14 +1439,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2058,12 +1456,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2081,14 +1473,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2106,12 +1490,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2128,14 +1506,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2153,12 +1523,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2175,14 +1539,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2200,12 +1556,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2222,14 +1572,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2247,12 +1589,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2269,14 +1605,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2294,12 +1622,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2316,14 +1638,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2341,12 +1655,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2363,14 +1671,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2388,12 +1688,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2411,14 +1705,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2436,12 +1722,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2459,14 +1739,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2484,12 +1756,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2507,14 +1773,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2532,12 +1790,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2555,14 +1807,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1824,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2603,14 +1841,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2628,12 +1858,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2651,14 +1875,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2676,12 +1892,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2699,14 +1909,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2724,12 +1926,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2747,14 +1943,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2772,12 +1960,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2795,14 +1977,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2820,12 +1994,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2842,14 +2010,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2867,12 +2027,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2889,14 +2043,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2914,12 +2060,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2936,14 +2076,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2961,12 +2093,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2983,14 +2109,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3008,12 +2126,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3030,14 +2142,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3055,12 +2159,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -3077,14 +2175,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -3102,12 +2192,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3125,14 +2209,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3150,12 +2226,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3173,14 +2243,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3198,12 +2260,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3221,14 +2277,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3246,12 +2294,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3269,14 +2311,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3294,12 +2328,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3317,14 +2345,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3342,12 +2362,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3365,14 +2379,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3390,12 +2396,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3413,14 +2413,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3438,12 +2430,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3461,14 +2447,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3486,12 +2464,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3509,14 +2481,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3534,12 +2498,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3556,14 +2514,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3581,12 +2531,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3603,14 +2547,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3628,12 +2564,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3650,14 +2580,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3675,12 +2597,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3697,14 +2613,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3722,12 +2630,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3744,14 +2646,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3769,12 +2663,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3791,14 +2679,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3816,12 +2696,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3838,14 +2712,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3863,12 +2729,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3886,14 +2746,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3911,12 +2763,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3934,14 +2780,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3959,12 +2797,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3982,14 +2814,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4007,12 +2831,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4030,14 +2848,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4055,12 +2865,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4078,14 +2882,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4103,12 +2899,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4126,14 +2916,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4151,12 +2933,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4174,14 +2950,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4199,12 +2967,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4222,14 +2984,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4247,12 +3001,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4270,14 +3018,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4295,12 +3035,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4318,14 +3052,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4343,12 +3069,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4366,14 +3086,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4391,12 +3103,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4414,14 +3120,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4439,12 +3137,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4462,14 +3154,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4487,12 +3171,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4510,14 +3188,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4535,12 +3205,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4558,14 +3222,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4583,12 +3239,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4606,14 +3256,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4631,12 +3273,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4654,14 +3290,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4679,12 +3307,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4702,14 +3324,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4727,12 +3341,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4750,14 +3358,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4775,12 +3375,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4798,14 +3392,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4823,12 +3409,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4846,14 +3426,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4871,12 +3443,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4894,14 +3460,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4919,12 +3477,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4942,14 +3494,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4967,12 +3511,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4990,14 +3528,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5015,12 +3545,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5038,14 +3562,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5063,12 +3579,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5086,14 +3596,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -5111,12 +3613,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5134,14 +3630,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5159,12 +3647,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5182,14 +3664,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5207,12 +3681,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5230,14 +3698,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5255,12 +3715,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5278,14 +3732,6 @@ entry: ret %a } -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen, - iXLen); - define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll index 0c9aa28d3b137..77572b597ccf5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,9 +28,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -60,9 +54,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -89,9 +80,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -118,9 +106,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -147,9 +132,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -176,9 +158,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -205,9 +184,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,9 +210,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -263,9 +236,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -292,9 +262,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -321,9 +288,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -350,9 +314,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -379,9 +340,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -408,9 +366,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -437,9 +392,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -466,9 +418,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -495,9 +444,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -524,9 +470,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -553,9 +496,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -582,9 +522,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -611,9 +548,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -640,9 +574,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -669,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -698,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -727,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -756,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -785,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -814,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -843,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -872,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -901,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -930,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -959,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -988,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1017,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1046,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1075,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1104,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1133,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1162,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1191,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1220,9 +1094,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1249,9 +1120,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1278,9 +1146,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1307,9 +1172,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1336,9 +1198,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1365,9 +1224,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1394,9 +1250,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1423,9 +1276,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1302,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1481,9 +1328,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1354,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1539,9 +1380,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1568,9 +1406,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1597,9 +1432,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1626,9 +1458,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1655,9 +1484,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1684,9 +1510,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1713,9 +1536,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1742,9 +1562,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1588,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1800,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1829,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1858,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1887,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1916,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1945,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1974,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2003,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2061,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2090,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2119,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2148,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2206,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2235,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2264,9 +2030,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2293,9 +2056,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2322,9 +2082,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2351,9 +2108,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2380,9 +2134,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2409,9 +2160,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2438,9 +2186,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2467,9 +2212,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2496,9 +2238,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2525,9 +2264,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2554,9 +2290,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2583,9 +2316,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2612,9 +2342,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2641,9 +2368,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2670,9 +2394,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2699,9 +2420,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2728,9 +2446,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2757,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2786,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2815,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2844,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2873,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2931,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2960,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2989,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3018,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3047,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3076,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3105,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3134,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3163,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3192,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3221,9 +2888,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3250,9 +2914,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2940,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3308,9 +2966,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3337,9 +2992,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3366,9 +3018,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3395,9 +3044,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3424,9 +3070,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3453,9 +3096,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3482,9 +3122,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3511,9 +3148,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3540,9 +3174,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3569,9 +3200,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3598,9 +3226,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3252,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3656,9 +3278,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3685,9 +3304,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3714,9 +3330,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3743,9 +3356,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3772,9 +3382,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3801,9 +3408,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3830,9 +3434,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3859,9 +3460,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3888,9 +3486,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3917,9 +3512,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3946,9 +3538,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3975,9 +3564,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3590,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4033,9 +3616,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4062,9 +3642,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4091,9 +3668,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4120,9 +3694,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4149,9 +3720,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4178,9 +3746,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4207,9 +3772,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4236,9 +3798,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4265,9 +3824,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4294,9 +3850,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4323,9 +3876,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3902,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4381,9 +3928,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4410,9 +3954,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4439,9 +3980,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4468,9 +4006,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4497,9 +4032,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4526,9 +4058,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4555,9 +4084,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4584,9 +4110,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4613,9 +4136,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4642,9 +4162,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4671,9 +4188,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4700,9 +4214,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4240,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4758,9 +4266,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4787,9 +4292,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4816,9 +4318,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4845,9 +4344,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4874,9 +4370,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4903,9 +4396,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4932,9 +4422,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4961,9 +4448,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4990,9 +4474,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5019,9 +4500,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5048,9 +4526,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4552,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5106,9 +4578,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5135,9 +4604,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5164,9 +4630,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5193,9 +4656,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5222,9 +4682,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5251,9 +4708,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5280,9 +4734,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5309,9 +4760,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5338,9 +4786,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5367,9 +4812,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5396,9 +4838,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5425,9 +4864,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4890,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5483,9 +4916,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5512,9 +4942,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5541,9 +4968,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5570,9 +4994,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5599,9 +5020,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5628,9 +5046,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5657,9 +5072,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5686,9 +5098,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5715,9 +5124,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5744,9 +5150,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5773,9 +5176,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5202,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5831,9 +5228,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5860,9 +5254,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5889,9 +5280,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5918,9 +5306,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5947,9 +5332,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5976,9 +5358,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6005,9 +5384,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6034,9 +5410,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6063,9 +5436,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6092,9 +5462,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6121,9 +5488,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6150,9 +5514,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5540,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6208,9 +5566,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6237,9 +5592,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6266,9 +5618,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6295,9 +5644,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6324,9 +5670,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6353,9 +5696,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6382,9 +5722,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6411,9 +5748,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6440,9 +5774,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6469,9 +5800,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6498,9 +5826,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5852,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6556,9 +5878,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6585,9 +5904,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6614,9 +5930,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6643,9 +5956,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6672,9 +5982,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6701,9 +6008,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6730,9 +6034,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32, i32) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6759,9 +6060,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6788,9 +6086,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6817,9 +6112,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6846,9 +6138,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6875,9 +6164,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6190,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32, i32) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6933,9 +6216,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6962,9 +6242,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6991,9 +6268,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7020,9 +6294,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7049,9 +6320,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7078,9 +6346,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32, i32) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7107,9 +6372,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7136,9 +6398,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7165,9 +6424,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32, i32) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7194,9 +6450,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7223,9 +6476,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6502,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32, i32) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7281,9 +6528,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7310,9 +6554,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7339,9 +6580,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32, i32) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7368,9 +6606,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7397,9 +6632,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7426,9 +6658,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32, i32) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7455,7 +6684,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7482,7 +6710,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7509,7 +6736,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7536,7 +6762,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7563,7 +6788,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7590,7 +6814,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7617,7 +6840,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7644,7 +6866,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7671,7 +6892,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7698,7 +6918,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7725,7 +6944,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7752,7 +6970,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7779,7 +6996,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -7806,7 +7022,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -7833,7 +7048,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -7860,7 +7074,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7887,7 +7100,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7914,7 +7126,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7941,7 +7152,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7178,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7995,7 +7204,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8022,7 +7230,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8049,7 +7256,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8076,7 +7282,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8103,7 +7308,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8130,7 +7334,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8157,7 +7360,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8184,7 +7386,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8211,7 +7412,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8238,7 +7438,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8265,7 +7464,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8292,7 +7490,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8319,7 +7516,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8346,7 +7542,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8373,7 +7568,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8400,7 +7594,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8427,7 +7620,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8454,7 +7646,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8481,7 +7672,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8508,7 +7698,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8535,7 +7724,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8562,7 +7750,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7776,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8616,7 +7802,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8643,7 +7828,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8670,7 +7854,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8697,7 +7880,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8724,7 +7906,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8751,7 +7932,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8778,7 +7958,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8805,7 +7984,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8832,7 +8010,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8859,7 +8036,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8886,7 +8062,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8913,7 +8088,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8940,7 +8114,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8967,7 +8140,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8994,7 +8166,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9021,7 +8192,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9048,7 +8218,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9075,7 +8244,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9102,7 +8270,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9129,7 +8296,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9156,7 +8322,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9183,7 +8348,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8374,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9237,7 +8400,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9264,7 +8426,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9291,7 +8452,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9318,7 +8478,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9345,7 +8504,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9372,7 +8530,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9399,7 +8556,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9426,7 +8582,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9453,7 +8608,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9480,7 +8634,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9507,7 +8660,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9534,7 +8686,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9561,7 +8712,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9588,7 +8738,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9615,7 +8764,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9642,7 +8790,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9669,7 +8816,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9696,7 +8842,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9723,7 +8868,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9750,7 +8894,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9777,7 +8920,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9804,7 +8946,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8972,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9858,7 +8998,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9885,7 +9024,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9912,7 +9050,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9939,7 +9076,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9966,7 +9102,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9993,7 +9128,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10020,7 +9154,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10047,7 +9180,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10074,7 +9206,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10101,7 +9232,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10128,7 +9258,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10155,7 +9284,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10182,7 +9310,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10209,7 +9336,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10236,7 +9362,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10263,7 +9388,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10290,7 +9414,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10317,7 +9440,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10344,7 +9466,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10371,7 +9492,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10398,7 +9518,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10425,7 +9544,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9570,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10479,7 +9596,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10506,7 +9622,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10533,7 +9648,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10560,7 +9674,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10587,7 +9700,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9726,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10641,7 +9752,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9778,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10695,7 +9804,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10722,7 +9830,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10749,7 +9856,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10776,7 +9882,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10803,7 +9908,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10830,7 +9934,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10857,7 +9960,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10884,7 +9986,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10911,7 +10012,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10938,7 +10038,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10965,7 +10064,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10992,7 +10090,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11019,7 +10116,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11046,7 +10142,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10168,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11100,7 +10194,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11127,7 +10220,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11154,7 +10246,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11181,7 +10272,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11208,7 +10298,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10324,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11262,7 +10350,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10376,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11316,7 +10402,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11343,7 +10428,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11370,7 +10454,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11397,7 +10480,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11424,7 +10506,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11451,7 +10532,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11478,7 +10558,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11505,7 +10584,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11532,7 +10610,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11559,7 +10636,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11586,7 +10662,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11613,7 +10688,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11640,7 +10714,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11667,7 +10740,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10766,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11721,7 +10792,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11748,7 +10818,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11775,7 +10844,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11802,7 +10870,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11829,7 +10896,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10922,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11883,7 +10948,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10974,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11937,7 +11000,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11964,7 +11026,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11991,7 +11052,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12018,7 +11078,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12045,7 +11104,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12072,7 +11130,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12099,7 +11156,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12126,7 +11182,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12153,7 +11208,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -12180,7 +11234,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -12207,7 +11260,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -12234,7 +11286,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12261,7 +11312,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12288,7 +11338,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12315,7 +11364,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12342,7 +11390,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12369,7 +11416,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12396,7 +11442,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12423,7 +11468,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12450,7 +11494,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11520,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12504,7 +11546,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11572,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12558,7 +11598,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12585,7 +11624,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12612,7 +11650,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12639,7 +11676,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12666,7 +11702,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12693,7 +11728,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12720,7 +11754,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12747,7 +11780,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12774,7 +11806,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12801,7 +11832,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12828,7 +11858,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12855,7 +11884,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12882,7 +11910,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12909,7 +11936,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12936,7 +11962,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12963,7 +11988,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12990,7 +12014,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13017,7 +12040,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13044,7 +12066,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13071,7 +12092,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13098,7 +12118,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13125,7 +12144,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13152,7 +12170,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13179,7 +12196,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13206,7 +12222,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13233,7 +12248,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13260,7 +12274,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13287,7 +12300,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13314,7 +12326,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13341,7 +12352,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13368,7 +12378,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13395,7 +12404,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13422,7 +12430,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13449,7 +12456,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13476,7 +12482,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13503,7 +12508,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13530,7 +12534,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13557,7 +12560,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13584,7 +12586,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13611,7 +12612,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13638,7 +12638,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13665,7 +12664,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13692,7 +12690,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12716,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13746,7 +12742,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12768,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13800,7 +12794,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13827,7 +12820,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll index cfe5ab2b07e64..0e43923294137 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -44,9 +41,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -73,9 +67,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -102,9 +93,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -131,9 +119,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -160,9 +145,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -189,9 +171,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -218,9 +197,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -247,9 +223,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -276,9 +249,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -305,9 +275,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -334,9 +301,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -363,9 +327,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -392,9 +353,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -421,9 +379,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -450,9 +405,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -479,9 +431,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -508,9 +457,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -537,9 +483,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -566,9 +509,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -595,9 +535,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -624,9 +561,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -666,9 +600,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -695,9 +626,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -724,9 +652,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -753,9 +678,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -782,9 +704,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -811,9 +730,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -840,9 +756,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -869,9 +782,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -898,9 +808,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -927,9 +834,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -956,9 +860,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -985,9 +886,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,9 +912,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1043,9 +938,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1072,9 +964,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1101,9 +990,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1130,9 +1016,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1159,9 +1042,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1188,9 +1068,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1230,9 +1107,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1259,9 +1133,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1288,9 +1159,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1317,9 +1185,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1346,9 +1211,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1375,9 +1237,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1404,9 +1263,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1433,9 +1289,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1462,9 +1315,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1491,9 +1341,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1520,9 +1367,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1549,9 +1393,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1578,9 +1419,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1607,9 +1445,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1636,9 +1471,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1665,9 +1497,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1694,9 +1523,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1723,9 +1549,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1575,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1794,9 +1614,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1823,9 +1640,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1666,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1881,9 +1692,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1910,9 +1718,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1939,9 +1744,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1968,9 +1770,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1997,9 +1796,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2026,9 +1822,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2055,9 +1848,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2084,9 +1874,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2113,9 +1900,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2142,9 +1926,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2171,9 +1952,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2200,9 +1978,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2229,9 +2004,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2271,9 +2043,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2300,9 +2069,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2329,9 +2095,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2358,9 +2121,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2387,9 +2147,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2416,9 +2173,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2445,9 +2199,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2474,9 +2225,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2503,9 +2251,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2532,9 +2277,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2561,9 +2303,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2590,9 +2329,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2619,9 +2355,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,9 +2381,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2407,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2706,9 +2433,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2748,9 +2472,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2498,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2806,9 +2524,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2835,9 +2550,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2864,9 +2576,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2893,9 +2602,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2922,9 +2628,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2951,9 +2654,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2980,9 +2680,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3009,9 +2706,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3038,9 +2732,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3067,9 +2758,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3096,9 +2784,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3125,9 +2810,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3154,9 +2836,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3183,9 +2862,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3225,9 +2901,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3254,9 +2927,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3283,9 +2953,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3312,9 +2979,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3341,9 +3005,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3370,9 +3031,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3399,9 +3057,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3428,9 +3083,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3457,9 +3109,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3486,9 +3135,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3515,9 +3161,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3544,9 +3187,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3573,9 +3213,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3239,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3631,9 +3265,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3660,9 +3291,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3689,9 +3317,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3718,9 +3343,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3747,9 +3369,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3776,9 +3395,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3805,9 +3421,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3834,9 +3447,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3863,9 +3473,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3892,9 +3499,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3921,9 +3525,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3950,9 +3551,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3979,9 +3577,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4008,9 +3603,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4037,9 +3629,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4066,9 +3655,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4095,9 +3681,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4124,9 +3707,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4153,9 +3733,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -4182,9 +3759,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4211,9 +3785,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4240,9 +3811,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4269,9 +3837,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4298,9 +3863,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3889,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4356,9 +3915,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4385,9 +3941,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4414,9 +3967,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4443,9 +3993,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4472,9 +4019,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4501,9 +4045,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4530,9 +4071,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4559,9 +4097,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4588,9 +4123,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4617,9 +4149,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4646,9 +4175,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4675,9 +4201,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4704,9 +4227,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4733,9 +4253,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4762,9 +4279,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4791,9 +4305,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4820,9 +4331,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4849,9 +4357,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4878,9 +4383,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4907,9 +4409,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4936,9 +4435,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4965,9 +4461,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4994,9 +4487,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5023,9 +4513,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4539,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5081,9 +4565,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5110,9 +4591,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -5139,9 +4617,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5168,9 +4643,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5197,9 +4669,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5226,9 +4695,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5255,9 +4721,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5284,9 +4747,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5313,9 +4773,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5342,9 +4799,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5371,9 +4825,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5400,9 +4851,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5429,9 +4877,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5458,9 +4903,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5487,9 +4929,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5516,9 +4955,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5545,9 +4981,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5574,9 +5007,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5603,9 +5033,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5632,9 +5059,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5661,9 +5085,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5690,9 +5111,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5719,9 +5137,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5748,9 +5163,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5189,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5806,9 +5215,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5835,9 +5241,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5864,9 +5267,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5893,9 +5293,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5922,9 +5319,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5951,9 +5345,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5980,9 +5371,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6009,9 +5397,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6038,9 +5423,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6067,9 +5449,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6096,9 +5475,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6125,9 +5501,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6154,9 +5527,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6183,9 +5553,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6212,9 +5579,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6241,9 +5605,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6270,9 +5631,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6299,9 +5657,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6328,9 +5683,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6357,9 +5709,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6386,9 +5735,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6415,9 +5761,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6444,9 +5787,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6473,9 +5813,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6502,9 +5839,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6531,9 +5865,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6560,9 +5891,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6589,9 +5917,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6618,9 +5943,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6647,9 +5969,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6676,9 +5995,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6705,9 +6021,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6734,9 +6047,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6763,9 +6073,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6792,9 +6099,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6821,9 +6125,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6850,9 +6151,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6879,9 +6177,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6908,9 +6203,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6937,9 +6229,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6966,9 +6255,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -6995,9 +6281,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7024,9 +6307,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7053,9 +6333,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7082,9 +6359,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7111,9 +6385,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7140,9 +6411,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7169,9 +6437,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7198,9 +6463,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7227,9 +6489,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7256,9 +6515,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7285,9 +6541,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7314,9 +6567,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7343,9 +6593,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7372,9 +6619,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7401,9 +6645,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7430,9 +6671,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7459,9 +6697,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7488,9 +6723,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7517,9 +6749,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7546,9 +6775,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7575,9 +6801,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7604,9 +6827,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7633,9 +6853,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7662,9 +6879,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7691,9 +6905,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7720,9 +6931,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7749,9 +6957,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7778,9 +6983,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7807,9 +7009,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7836,9 +7035,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7865,9 +7061,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7894,9 +7087,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7923,9 +7113,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7952,9 +7139,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7981,9 +7165,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8010,9 +7191,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8039,9 +7217,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8068,9 +7243,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8097,9 +7269,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8126,9 +7295,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8155,9 +7321,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8184,9 +7347,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8213,9 +7373,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8242,9 +7399,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8271,9 +7425,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8300,9 +7451,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8329,9 +7477,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8358,9 +7503,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8387,9 +7529,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8416,9 +7555,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8445,9 +7581,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8474,9 +7607,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8503,9 +7633,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8532,9 +7659,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8561,9 +7685,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8590,9 +7711,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8619,9 +7737,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8648,9 +7763,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8677,9 +7789,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8706,9 +7815,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8735,9 +7841,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8764,9 +7867,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8793,9 +7893,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8822,9 +7919,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8851,9 +7945,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8880,9 +7971,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8909,9 +7997,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8938,9 +8023,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8967,9 +8049,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8996,9 +8075,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9025,9 +8101,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9054,9 +8127,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9083,9 +8153,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9112,9 +8179,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9141,9 +8205,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9170,9 +8231,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) - define @test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9199,9 +8257,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9228,9 +8283,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9257,9 +8309,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9286,9 +8335,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9315,9 +8361,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9344,9 +8387,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9373,9 +8413,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9402,9 +8439,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) - define @test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9431,9 +8465,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9460,9 +8491,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9489,9 +8517,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9518,9 +8543,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) - define @test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9547,9 +8569,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9576,9 +8595,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9605,9 +8621,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9634,9 +8647,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) - define @test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9663,9 +8673,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9692,9 +8699,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9721,9 +8725,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9750,9 +8751,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) - define @test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9779,9 +8777,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9808,9 +8803,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9837,9 +8829,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9866,9 +8855,6 @@ entry: ret %1 } -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) - define @test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9895,7 +8881,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9922,7 +8907,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9949,7 +8933,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9976,7 +8959,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10003,7 +8985,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10030,7 +9011,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10057,7 +9037,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10084,7 +9063,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10111,7 +9089,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10138,7 +9115,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10165,7 +9141,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10192,7 +9167,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10219,7 +9193,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10246,7 +9219,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10273,7 +9245,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10300,7 +9271,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10327,7 +9297,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10354,7 +9323,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10381,7 +9349,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10408,7 +9375,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10435,7 +9401,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10462,7 +9427,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10489,7 +9453,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10516,7 +9479,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10543,7 +9505,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10570,7 +9531,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10597,7 +9557,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10624,7 +9583,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10651,7 +9609,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10678,7 +9635,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10705,7 +9661,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10732,7 +9687,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10759,7 +9713,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10786,7 +9739,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10813,7 +9765,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -10840,7 +9791,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10867,7 +9817,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10894,7 +9843,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10921,7 +9869,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10948,7 +9895,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10975,7 +9921,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11002,7 +9947,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11029,7 +9973,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11056,7 +9999,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11083,7 +10025,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11110,7 +10051,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11137,7 +10077,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11164,7 +10103,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11191,7 +10129,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11218,7 +10155,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11245,7 +10181,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11272,7 +10207,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11299,7 +10233,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11326,7 +10259,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11353,7 +10285,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11380,7 +10311,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11407,7 +10337,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11434,7 +10363,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11461,7 +10389,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11488,7 +10415,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11515,7 +10441,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11542,7 +10467,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11569,7 +10493,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11596,7 +10519,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11623,7 +10545,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11650,7 +10571,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11677,7 +10597,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11704,7 +10623,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11731,7 +10649,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11758,7 +10675,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11785,7 +10701,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11812,7 +10727,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11839,7 +10753,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11866,7 +10779,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11893,7 +10805,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11920,7 +10831,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11947,7 +10857,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11974,7 +10883,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12001,7 +10909,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12028,7 +10935,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12055,7 +10961,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12082,7 +10987,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12109,7 +11013,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12136,7 +11039,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12163,7 +11065,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12190,7 +11091,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12217,7 +11117,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12244,7 +11143,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12271,7 +11169,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12298,7 +11195,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12325,7 +11221,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12352,7 +11247,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12379,7 +11273,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12406,7 +11299,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12433,7 +11325,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12460,7 +11351,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12487,7 +11377,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12514,7 +11403,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12541,7 +11429,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12568,7 +11455,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12595,7 +11481,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12622,7 +11507,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12649,7 +11533,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12676,7 +11559,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12703,7 +11585,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12730,7 +11611,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12757,7 +11637,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12784,7 +11663,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12811,7 +11689,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12838,7 +11715,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12865,7 +11741,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12892,7 +11767,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -12919,7 +11793,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -12946,7 +11819,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -12973,7 +11845,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -13000,7 +11871,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13027,7 +11897,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13054,7 +11923,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13081,7 +11949,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13108,7 +11975,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13135,7 +12001,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13162,7 +12027,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13189,7 +12053,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13216,7 +12079,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13243,7 +12105,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13270,7 +12131,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13297,7 +12157,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13324,7 +12183,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13351,7 +12209,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13378,7 +12235,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13405,7 +12261,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13432,7 +12287,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13459,7 +12313,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13486,7 +12339,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13513,7 +12365,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13540,7 +12391,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13567,7 +12417,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13594,7 +12443,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13621,7 +12469,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13648,7 +12495,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13675,7 +12521,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13702,7 +12547,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13729,7 +12573,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13756,7 +12599,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13783,7 +12625,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13810,7 +12651,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13837,7 +12677,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13864,7 +12703,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13891,7 +12729,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13918,7 +12755,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13945,7 +12781,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13972,7 +12807,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13999,7 +12833,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14026,7 +12859,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14053,7 +12885,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14080,7 +12911,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14107,7 +12937,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14134,7 +12963,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14161,7 +12989,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14188,7 +13015,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14215,7 +13041,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14242,7 +13067,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14269,7 +13093,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14296,7 +13119,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14323,7 +13145,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14350,7 +13171,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14377,7 +13197,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14404,7 +13223,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14431,7 +13249,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14458,7 +13275,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14485,7 +13301,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14512,7 +13327,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14539,7 +13353,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14566,7 +13379,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14593,7 +13405,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14620,7 +13431,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14647,7 +13457,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14674,7 +13483,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14701,7 +13509,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14728,7 +13535,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14755,7 +13561,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14782,7 +13587,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14809,7 +13613,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14836,7 +13639,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14863,7 +13665,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14890,7 +13691,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14917,7 +13717,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14944,7 +13743,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14971,7 +13769,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14998,7 +13795,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15025,7 +13821,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15052,7 +13847,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15079,7 +13873,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15106,7 +13899,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15133,7 +13925,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15160,7 +13951,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15187,7 +13977,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15214,7 +14003,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15241,7 +14029,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15268,7 +14055,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15295,7 +14081,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15322,7 +14107,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15349,7 +14133,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15376,7 +14159,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15403,7 +14185,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15430,7 +14211,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15457,7 +14237,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15484,7 +14263,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15511,7 +14289,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15538,7 +14315,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15565,7 +14341,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15592,7 +14367,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15619,7 +14393,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15646,7 +14419,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15673,7 +14445,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15700,7 +14471,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15727,7 +14497,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15754,7 +14523,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15781,7 +14549,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15808,7 +14575,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15835,7 +14601,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15862,7 +14627,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15889,7 +14653,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15916,7 +14679,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15943,7 +14705,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15970,7 +14731,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15997,7 +14757,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16024,7 +14783,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16051,7 +14809,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16078,7 +14835,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16105,7 +14861,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16132,7 +14887,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -16159,7 +14913,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -16186,7 +14939,6 @@ entry: ret %1 } - define @test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -16213,7 +14965,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16240,7 +14991,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16267,7 +15017,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16294,7 +15043,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16321,7 +15069,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16348,7 +15095,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16375,7 +15121,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16402,7 +15147,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16429,7 +15173,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16456,7 +15199,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16483,7 +15225,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16510,7 +15251,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16537,7 +15277,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16564,7 +15303,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -16591,7 +15329,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -16618,7 +15355,6 @@ entry: ret %1 } - define @test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -16645,7 +15381,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -16672,7 +15407,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -16699,7 +15433,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -16726,7 +15459,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -16753,7 +15485,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -16780,7 +15511,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -16807,7 +15537,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -16834,7 +15563,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -16861,7 +15589,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -16888,7 +15615,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -16915,7 +15641,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -16942,7 +15667,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -16969,7 +15693,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -16996,7 +15719,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -17023,7 +15745,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -17050,7 +15771,6 @@ entry: ret %1 } - define @test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -17077,7 +15797,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17104,7 +15823,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17131,7 +15849,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17158,7 +15875,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17185,7 +15901,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17212,7 +15927,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17239,7 +15953,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17266,7 +15979,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17293,7 +16005,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17320,7 +16031,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17347,7 +16057,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17374,7 +16083,6 @@ entry: ret %1 } - define @test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17401,7 +16109,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17428,7 +16135,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17455,7 +16161,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17482,7 +16187,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17509,7 +16213,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17536,7 +16239,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17563,7 +16265,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17590,7 +16291,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17617,7 +16317,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17644,7 +16343,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17671,7 +16369,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -17698,7 +16395,6 @@ entry: ret %1 } - define @test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -17725,7 +16421,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17752,7 +16447,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -17779,7 +16473,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -17806,7 +16499,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -17833,7 +16525,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -17860,7 +16551,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -17887,7 +16577,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -17914,7 +16603,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -17941,7 +16629,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -17968,7 +16655,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -17995,7 +16681,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -18022,7 +16707,6 @@ entry: ret %1 } - define @test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -18049,7 +16733,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -18076,7 +16759,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -18103,7 +16785,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -18130,7 +16811,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -18157,7 +16837,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -18184,7 +16863,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -18211,7 +16889,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -18238,7 +16915,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -18265,7 +16941,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -18292,7 +16967,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -18319,7 +16993,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -18346,7 +17019,6 @@ entry: ret %1 } - define @test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll index c334e70f1f358..2ad7ac9390515 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv1i8(, , , i32) -declare @llvm.vp.add.nxv1i8(, , , i32) -declare @llvm.vp.merge.nxv1i8(, , , i32) -declare @llvm.vp.select.nxv1i8(, , , i32) - define @vmacc_vv_nxv1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define @vmacc_vx_nxv1i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv2i8(, , , i32) -declare @llvm.vp.add.nxv2i8(, , , i32) -declare @llvm.vp.merge.nxv2i8(, , , i32) -declare @llvm.vp.select.nxv2i8(, , , i32) - define @vmacc_vv_nxv2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define @vmacc_vx_nxv2i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv4i8(, , , i32) -declare @llvm.vp.add.nxv4i8(, , , i32) -declare @llvm.vp.merge.nxv4i8(, , , i32) -declare @llvm.vp.select.nxv4i8(, , , i32) - define @vmacc_vv_nxv4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define @vmacc_vx_nxv4i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv8i8(, , , i32) -declare @llvm.vp.add.nxv8i8(, , , i32) -declare @llvm.vp.merge.nxv8i8(, , , i32) -declare @llvm.vp.select.nxv8i8(, , , i32) - define @vmacc_vv_nxv8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define @vmacc_vx_nxv8i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv16i8(, , , i32) -declare @llvm.vp.add.nxv16i8(, , , i32) -declare @llvm.vp.merge.nxv16i8(, , , i32) -declare @llvm.vp.select.nxv16i8(, , , i32) - define @vmacc_vv_nxv16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define @vmacc_vx_nxv16i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv32i8(, , , i32) -declare @llvm.vp.add.nxv32i8(, , , i32) -declare @llvm.vp.merge.nxv32i8(, , , i32) -declare @llvm.vp.select.nxv32i8(, , , i32) - define @vmacc_vv_nxv32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define @vmacc_vx_nxv32i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv64i8(, , , i32) -declare @llvm.vp.add.nxv64i8(, , , i32) -declare @llvm.vp.merge.nxv64i8(, , , i32) -declare @llvm.vp.select.nxv64i8(, , , i32) - define @vmacc_vv_nxv64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv64i8: ; CHECK: # %bb.0: @@ -630,11 +595,6 @@ define @vmacc_vx_nxv64i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv1i16(, , , i32) -declare @llvm.vp.add.nxv1i16(, , , i32) -declare @llvm.vp.merge.nxv1i16(, , , i32) -declare @llvm.vp.select.nxv1i16(, , , i32) - define @vmacc_vv_nxv1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i16: ; CHECK: # %bb.0: @@ -719,11 +679,6 @@ define @vmacc_vx_nxv1i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv2i16(, , , i32) -declare @llvm.vp.add.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i16(, , , i32) - define @vmacc_vv_nxv2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i16: ; CHECK: # %bb.0: @@ -808,11 +763,6 @@ define @vmacc_vx_nxv2i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv4i16(, , , i32) -declare @llvm.vp.add.nxv4i16(, , , i32) -declare @llvm.vp.merge.nxv4i16(, , , i32) -declare @llvm.vp.select.nxv4i16(, , , i32) - define @vmacc_vv_nxv4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i16: ; CHECK: # %bb.0: @@ -897,11 +847,6 @@ define @vmacc_vx_nxv4i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv8i16(, , , i32) -declare @llvm.vp.add.nxv8i16(, , , i32) -declare @llvm.vp.merge.nxv8i16(, , , i32) -declare @llvm.vp.select.nxv8i16(, , , i32) - define @vmacc_vv_nxv8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i16: ; CHECK: # %bb.0: @@ -986,11 +931,6 @@ define @vmacc_vx_nxv8i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv16i16(, , , i32) -declare @llvm.vp.add.nxv16i16(, , , i32) -declare @llvm.vp.merge.nxv16i16(, , , i32) -declare @llvm.vp.select.nxv16i16(, , , i32) - define @vmacc_vv_nxv16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1075,11 +1015,6 @@ define @vmacc_vx_nxv16i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv32i16(, , , i32) -declare @llvm.vp.add.nxv32i16(, , , i32) -declare @llvm.vp.merge.nxv32i16(, , , i32) -declare @llvm.vp.select.nxv32i16(, , , i32) - define @vmacc_vv_nxv32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1167,11 +1102,6 @@ define @vmacc_vx_nxv32i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) -declare @llvm.vp.select.nxv1i32(, , , i32) - define @vmacc_vv_nxv1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1256,11 +1186,6 @@ define @vmacc_vx_nxv1i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv2i32(, , , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) - define @vmacc_vv_nxv2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1345,11 +1270,6 @@ define @vmacc_vx_nxv2i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv4i32(, , , i32) -declare @llvm.vp.add.nxv4i32(, , , i32) -declare @llvm.vp.merge.nxv4i32(, , , i32) -declare @llvm.vp.select.nxv4i32(, , , i32) - define @vmacc_vv_nxv4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1434,11 +1354,6 @@ define @vmacc_vx_nxv4i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv8i32(, , , i32) -declare @llvm.vp.add.nxv8i32(, , , i32) -declare @llvm.vp.merge.nxv8i32(, , , i32) -declare @llvm.vp.select.nxv8i32(, , , i32) - define @vmacc_vv_nxv8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1523,11 +1438,6 @@ define @vmacc_vx_nxv8i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv16i32(, , , i32) -declare @llvm.vp.add.nxv16i32(, , , i32) -declare @llvm.vp.merge.nxv16i32(, , , i32) -declare @llvm.vp.select.nxv16i32(, , , i32) - define @vmacc_vv_nxv16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1615,11 +1525,6 @@ define @vmacc_vx_nxv16i32_ta( %a, i32 %b, ret %u } -declare @llvm.vp.mul.nxv1i64(, , , i32) -declare @llvm.vp.add.nxv1i64(, , , i32) -declare @llvm.vp.merge.nxv1i64(, , , i32) -declare @llvm.vp.select.nxv1i64(, , , i32) - define @vmacc_vv_nxv1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1751,11 +1656,6 @@ define @vmacc_vx_nxv1i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv2i64(, , , i32) -declare @llvm.vp.add.nxv2i64(, , , i32) -declare @llvm.vp.merge.nxv2i64(, , , i32) -declare @llvm.vp.select.nxv2i64(, , , i32) - define @vmacc_vv_nxv2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1887,11 +1787,6 @@ define @vmacc_vx_nxv2i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv4i64(, , , i32) -declare @llvm.vp.add.nxv4i64(, , , i32) -declare @llvm.vp.merge.nxv4i64(, , , i32) -declare @llvm.vp.select.nxv4i64(, , , i32) - define @vmacc_vv_nxv4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv4i64: ; CHECK: # %bb.0: @@ -2023,11 +1918,6 @@ define @vmacc_vx_nxv4i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv8i64(, , , i32) -declare @llvm.vp.add.nxv8i64(, , , i32) -declare @llvm.vp.merge.nxv8i64(, , , i32) -declare @llvm.vp.select.nxv8i64(, , , i32) - define @vmacc_vv_nxv8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmacc_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll index b8b4baf53b677..7aade205167b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmacc.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vmacc.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll index 31c12db79a946..33a49cefd54d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -73,12 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -119,12 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -165,12 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -211,12 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -257,12 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -303,12 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -349,12 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -395,12 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -441,12 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -487,12 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -533,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -579,12 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -602,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -625,12 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -648,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv64i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -671,12 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -694,12 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -717,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -740,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -763,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -786,12 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv32i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -809,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -855,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -901,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv16i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv1i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -960,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv2i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -996,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv4i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1032,12 +774,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.carry.in.nxv8i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll index 4777903558e4c..503c0fd4c232c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmadc.nxv1i8.nxv1i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i8.nxv2i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i8.nxv4i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i8.nxv8i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i8.nxv16i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i8.nxv32i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i8.nxv64i8( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i16.nxv1i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i16.nxv2i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i16.nxv4i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i16.nxv8i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i16.nxv16i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i16.nxv32i16( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i32.nxv1i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i32.nxv2i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i32.nxv4i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i32.nxv8i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i32.nxv16i32( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i64.nxv1i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -384,11 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i64.nxv2i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -404,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i64.nxv4i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -424,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i64.nxv8i64( - , - , - iXLen); - define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -444,11 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -464,11 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -484,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -504,11 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -524,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -544,11 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -564,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv64i8.i8( - , - i8, - iXLen); - define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -584,11 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -604,11 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -624,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -644,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -664,11 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -684,11 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv32i16.i16( - , - i16, - iXLen); - define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -704,11 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -724,11 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -744,11 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -764,11 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -784,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv16i32.i32( - , - i32, - iXLen); - define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -804,11 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -836,11 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -868,11 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -900,11 +685,6 @@ entry: ret %a } -declare @llvm.riscv.vmadc.nxv8i64.i64( - , - i64, - iXLen); - define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll index f55c7c0b90b3f..fe5b8b9bf6d52 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv1i8(, , , i32) -declare @llvm.vp.add.nxv1i8(, , , i32) -declare @llvm.vp.merge.nxv1i8(, , , i32) -declare @llvm.vp.select.nxv1i8(, , , i32) - define @vmadd_vv_nxv1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i8: ; CHECK: # %bb.0: @@ -87,11 +82,6 @@ define @vmadd_vx_nxv1i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv2i8(, , , i32) -declare @llvm.vp.add.nxv2i8(, , , i32) -declare @llvm.vp.merge.nxv2i8(, , , i32) -declare @llvm.vp.select.nxv2i8(, , , i32) - define @vmadd_vv_nxv2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i8: ; CHECK: # %bb.0: @@ -170,11 +160,6 @@ define @vmadd_vx_nxv2i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv4i8(, , , i32) -declare @llvm.vp.add.nxv4i8(, , , i32) -declare @llvm.vp.merge.nxv4i8(, , , i32) -declare @llvm.vp.select.nxv4i8(, , , i32) - define @vmadd_vv_nxv4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i8: ; CHECK: # %bb.0: @@ -253,11 +238,6 @@ define @vmadd_vx_nxv4i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv8i8(, , , i32) -declare @llvm.vp.add.nxv8i8(, , , i32) -declare @llvm.vp.merge.nxv8i8(, , , i32) -declare @llvm.vp.select.nxv8i8(, , , i32) - define @vmadd_vv_nxv8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i8: ; CHECK: # %bb.0: @@ -336,11 +316,6 @@ define @vmadd_vx_nxv8i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv16i8(, , , i32) -declare @llvm.vp.add.nxv16i8(, , , i32) -declare @llvm.vp.merge.nxv16i8(, , , i32) -declare @llvm.vp.select.nxv16i8(, , , i32) - define @vmadd_vv_nxv16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv16i8: ; CHECK: # %bb.0: @@ -419,11 +394,6 @@ define @vmadd_vx_nxv16i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv32i8(, , , i32) -declare @llvm.vp.add.nxv32i8(, , , i32) -declare @llvm.vp.merge.nxv32i8(, , , i32) -declare @llvm.vp.select.nxv32i8(, , , i32) - define @vmadd_vv_nxv32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv32i8: ; CHECK: # %bb.0: @@ -502,11 +472,6 @@ define @vmadd_vx_nxv32i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv64i8(, , , i32) -declare @llvm.vp.add.nxv64i8(, , , i32) -declare @llvm.vp.merge.nxv64i8(, , , i32) -declare @llvm.vp.select.nxv64i8(, , , i32) - define @vmadd_vv_nxv64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv64i8: ; CHECK: # %bb.0: @@ -588,11 +553,6 @@ define @vmadd_vx_nxv64i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv1i16(, , , i32) -declare @llvm.vp.add.nxv1i16(, , , i32) -declare @llvm.vp.merge.nxv1i16(, , , i32) -declare @llvm.vp.select.nxv1i16(, , , i32) - define @vmadd_vv_nxv1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i16: ; CHECK: # %bb.0: @@ -671,11 +631,6 @@ define @vmadd_vx_nxv1i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv2i16(, , , i32) -declare @llvm.vp.add.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i16(, , , i32) - define @vmadd_vv_nxv2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i16: ; CHECK: # %bb.0: @@ -754,11 +709,6 @@ define @vmadd_vx_nxv2i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv4i16(, , , i32) -declare @llvm.vp.add.nxv4i16(, , , i32) -declare @llvm.vp.merge.nxv4i16(, , , i32) -declare @llvm.vp.select.nxv4i16(, , , i32) - define @vmadd_vv_nxv4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i16: ; CHECK: # %bb.0: @@ -837,11 +787,6 @@ define @vmadd_vx_nxv4i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv8i16(, , , i32) -declare @llvm.vp.add.nxv8i16(, , , i32) -declare @llvm.vp.merge.nxv8i16(, , , i32) -declare @llvm.vp.select.nxv8i16(, , , i32) - define @vmadd_vv_nxv8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i16: ; CHECK: # %bb.0: @@ -920,11 +865,6 @@ define @vmadd_vx_nxv8i16_ta( %a, i16 %b, %u } -declare @llvm.vp.mul.nxv16i16(, , , i32) -declare @llvm.vp.add.nxv16i16(, , , i32) -declare @llvm.vp.merge.nxv16i16(, , , i32) -declare @llvm.vp.select.nxv16i16(, , , i32) - define @vmadd_vv_nxv16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1003,11 +943,6 @@ define @vmadd_vx_nxv16i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv32i16(, , , i32) -declare @llvm.vp.add.nxv32i16(, , , i32) -declare @llvm.vp.merge.nxv32i16(, , , i32) -declare @llvm.vp.select.nxv32i16(, , , i32) - define @vmadd_vv_nxv32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1089,11 +1024,6 @@ define @vmadd_vx_nxv32i16_ta( %a, i16 %b, ret %u } -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) -declare @llvm.vp.select.nxv1i32(, , , i32) - define @vmadd_vv_nxv1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1172,11 +1102,6 @@ define @vmadd_vx_nxv1i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv2i32(, , , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) - define @vmadd_vv_nxv2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1255,11 +1180,6 @@ define @vmadd_vx_nxv2i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv4i32(, , , i32) -declare @llvm.vp.add.nxv4i32(, , , i32) -declare @llvm.vp.merge.nxv4i32(, , , i32) -declare @llvm.vp.select.nxv4i32(, , , i32) - define @vmadd_vv_nxv4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1338,11 +1258,6 @@ define @vmadd_vx_nxv4i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv8i32(, , , i32) -declare @llvm.vp.add.nxv8i32(, , , i32) -declare @llvm.vp.merge.nxv8i32(, , , i32) -declare @llvm.vp.select.nxv8i32(, , , i32) - define @vmadd_vv_nxv8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1421,11 +1336,6 @@ define @vmadd_vx_nxv8i32_ta( %a, i32 %b, %u } -declare @llvm.vp.mul.nxv16i32(, , , i32) -declare @llvm.vp.add.nxv16i32(, , , i32) -declare @llvm.vp.merge.nxv16i32(, , , i32) -declare @llvm.vp.select.nxv16i32(, , , i32) - define @vmadd_vv_nxv16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1507,11 +1417,6 @@ define @vmadd_vx_nxv16i32_ta( %a, i32 %b, ret %u } -declare @llvm.vp.mul.nxv1i64(, , , i32) -declare @llvm.vp.add.nxv1i64(, , , i32) -declare @llvm.vp.merge.nxv1i64(, , , i32) -declare @llvm.vp.select.nxv1i64(, , , i32) - define @vmadd_vv_nxv1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1634,11 +1539,6 @@ define @vmadd_vx_nxv1i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv2i64(, , , i32) -declare @llvm.vp.add.nxv2i64(, , , i32) -declare @llvm.vp.merge.nxv2i64(, , , i32) -declare @llvm.vp.select.nxv2i64(, , , i32) - define @vmadd_vv_nxv2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1761,11 +1661,6 @@ define @vmadd_vx_nxv2i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv4i64(, , , i32) -declare @llvm.vp.add.nxv4i64(, , , i32) -declare @llvm.vp.merge.nxv4i64(, , , i32) -declare @llvm.vp.select.nxv4i64(, , , i32) - define @vmadd_vv_nxv4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1888,11 +1783,6 @@ define @vmadd_vx_nxv4i64_ta( %a, i64 %b, %u } -declare @llvm.vp.mul.nxv8i64(, , , i32) -declare @llvm.vp.add.nxv8i64(, , , i32) -declare @llvm.vp.merge.nxv8i64(, , , i32) -declare @llvm.vp.select.nxv8i64(, , , i32) - define @vmadd_vv_nxv8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmadd_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll index 829d082ab7a4f..482642591c91a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmadd.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vmadd.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand.ll b/llvm/test/CodeGen/RISCV/rvv/vmand.ll index 67c89799779f0..c51a7463ac031 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmand.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmand.nxv1i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv2i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv4i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv8i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv16i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv32i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmand.nxv64i1( - , - , - iXLen); - define @intrinsic_vmand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll index 38d71d12660b5..1361f9d67b522 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmandn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmandn.nxv1i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv2i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv4i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv8i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv16i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv32i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmandn.nxv64i1( - , - , - iXLen); - define @intrinsic_vmandn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll index d81936354f6f3..3cf464247250a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.smax.nxv8i7(, , , i32) - define @vmax_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vmax_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.smax.nxv1i8(, , , i32) - define @vmax_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i8: ; CHECK: # %bb.0: @@ -81,8 +77,6 @@ define @vmax_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv2i8(, , , i32) - define @vmax_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i8: ; CHECK: # %bb.0: @@ -127,8 +121,6 @@ define @vmax_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv3i8(, , , i32) - define @vmax_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv3i8: ; CHECK: # %bb.0: @@ -173,8 +165,6 @@ define @vmax_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv4i8(, , , i32) - define @vmax_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i8: ; CHECK: # %bb.0: @@ -219,8 +209,6 @@ define @vmax_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv8i8(, , , i32) - define @vmax_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i8: ; CHECK: # %bb.0: @@ -265,8 +253,6 @@ define @vmax_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smax.nxv16i8(, , , i32) - define @vmax_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv16i8: ; CHECK: # %bb.0: @@ -311,8 +297,6 @@ define @vmax_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smax.nxv32i8(, , , i32) - define @vmax_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv32i8: ; CHECK: # %bb.0: @@ -357,8 +341,6 @@ define @vmax_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smax.nxv64i8(, , , i32) - define @vmax_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv64i8: ; CHECK: # %bb.0: @@ -405,8 +387,6 @@ define @vmax_vx_nxv64i8_unmasked( %va, i8 % ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.smax.nxv128i8(, , , i32) - define @vmax_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv128i8: ; CHECK: # %bb.0: @@ -459,8 +439,6 @@ define @vmax_vx_nxv128i8_unmasked( %va, i ret %v } -declare @llvm.vp.smax.nxv1i16(, , , i32) - define @vmax_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i16: ; CHECK: # %bb.0: @@ -505,8 +483,6 @@ define @vmax_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv2i16(, , , i32) - define @vmax_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i16: ; CHECK: # %bb.0: @@ -551,8 +527,6 @@ define @vmax_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv4i16(, , , i32) - define @vmax_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i16: ; CHECK: # %bb.0: @@ -597,8 +571,6 @@ define @vmax_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv8i16(, , , i32) - define @vmax_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i16: ; CHECK: # %bb.0: @@ -643,8 +615,6 @@ define @vmax_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smax.nxv16i16(, , , i32) - define @vmax_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv16i16: ; CHECK: # %bb.0: @@ -689,8 +659,6 @@ define @vmax_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.smax.nxv32i16(, , , i32) - define @vmax_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv32i16: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define @vmax_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.smax.nxv1i32(, , , i32) - define @vmax_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i32: ; CHECK: # %bb.0: @@ -781,8 +747,6 @@ define @vmax_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv2i32(, , , i32) - define @vmax_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i32: ; CHECK: # %bb.0: @@ -827,8 +791,6 @@ define @vmax_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv4i32(, , , i32) - define @vmax_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i32: ; CHECK: # %bb.0: @@ -873,8 +835,6 @@ define @vmax_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv8i32(, , , i32) - define @vmax_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i32: ; CHECK: # %bb.0: @@ -919,8 +879,6 @@ define @vmax_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smax.nxv16i32(, , , i32) - define @vmax_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv16i32: ; CHECK: # %bb.0: @@ -967,8 +925,6 @@ define @vmax_vx_nxv16i32_unmasked( %va, i ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.smax.nxv32i32(, , , i32) - define @vmax_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1024,8 +980,6 @@ define @vmax_vx_nxv32i32_unmasked( %va, i ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vmax_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vmax_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1091,8 +1045,6 @@ define @vmax_vx_nxv32i32_evl_nx16( %va, i ret %v } -declare @llvm.vp.smax.nxv1i64(, , , i32) - define @vmax_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1165,8 +1117,6 @@ define @vmax_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smax.nxv2i64(, , , i32) - define @vmax_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1239,8 +1189,6 @@ define @vmax_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smax.nxv4i64(, , , i32) - define @vmax_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1313,8 +1261,6 @@ define @vmax_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smax.nxv8i64(, , , i32) - define @vmax_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmax_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax.ll b/llvm/test/CodeGen/RISCV/rvv/vmax.ll index 7b22649e26425..e90ccc7f21291 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmax.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vmax.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll index 7603bcef1973e..e755d099df4a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.umax.nxv8i7(, , , i32) - define @vmaxu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vmaxu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.umax.nxv1i8(, , , i32) - define @vmaxu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -80,8 +76,6 @@ define @vmaxu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv2i8(, , , i32) - define @vmaxu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -126,8 +120,6 @@ define @vmaxu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv3i8(, , , i32) - define @vmaxu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -172,8 +164,6 @@ define @vmaxu_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv4i8(, , , i32) - define @vmaxu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -218,8 +208,6 @@ define @vmaxu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv8i8(, , , i32) - define @vmaxu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -264,8 +252,6 @@ define @vmaxu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umax.nxv16i8(, , , i32) - define @vmaxu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -310,8 +296,6 @@ define @vmaxu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umax.nxv32i8(, , , i32) - define @vmaxu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -356,8 +340,6 @@ define @vmaxu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umax.nxv64i8(, , , i32) - define @vmaxu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -404,8 +386,6 @@ define @vmaxu_vx_nxv64i8_unmasked( %va, i8 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.umax.nxv128i8(, , , i32) - define @vmaxu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv128i8: ; CHECK: # %bb.0: @@ -458,8 +438,6 @@ define @vmaxu_vx_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.umax.nxv1i16(, , , i32) - define @vmaxu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -504,8 +482,6 @@ define @vmaxu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv2i16(, , , i32) - define @vmaxu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -550,8 +526,6 @@ define @vmaxu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv4i16(, , , i32) - define @vmaxu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -596,8 +570,6 @@ define @vmaxu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv8i16(, , , i32) - define @vmaxu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -642,8 +614,6 @@ define @vmaxu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umax.nxv16i16(, , , i32) - define @vmaxu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -688,8 +658,6 @@ define @vmaxu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.umax.nxv32i16(, , , i32) - define @vmaxu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -734,8 +702,6 @@ define @vmaxu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.umax.nxv1i32(, , , i32) - define @vmaxu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -780,8 +746,6 @@ define @vmaxu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv2i32(, , , i32) - define @vmaxu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -826,8 +790,6 @@ define @vmaxu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv4i32(, , , i32) - define @vmaxu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -872,8 +834,6 @@ define @vmaxu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv8i32(, , , i32) - define @vmaxu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -918,8 +878,6 @@ define @vmaxu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umax.nxv16i32(, , , i32) - define @vmaxu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -966,8 +924,6 @@ define @vmaxu_vx_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.umax.nxv32i32(, , , i32) - define @vmaxu_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1023,8 +979,6 @@ define @vmaxu_vx_nxv32i32_unmasked( %va, ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vmaxu_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1090,8 +1044,6 @@ define @vmaxu_vx_nxv32i32_evl_nx16( %va, ret %v } -declare @llvm.vp.umax.nxv1i64(, , , i32) - define @vmaxu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1164,8 +1116,6 @@ define @vmaxu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umax.nxv2i64(, , , i32) - define @vmaxu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1238,8 +1188,6 @@ define @vmaxu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umax.nxv4i64(, , , i32) - define @vmaxu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1312,8 +1260,6 @@ define @vmaxu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umax.nxv8i64(, , , i32) - define @vmaxu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmaxu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll index 377c182cab21c..96e7e3df34955 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vmaxu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll index c00fc445fc5a3..fff20306d17b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmclr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll @@ -4,9 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmclr.nxv1i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -20,9 +17,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv2i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -36,9 +30,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv4i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -52,9 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv8i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -68,9 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv16i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -84,9 +69,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv32i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -100,9 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmclr.nxv64i1( - iXLen); - define @intrinsic_vmclr_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll index 3fb5aa02230b4..4a411475e337a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck -check-prefixes=CHECK,RV64 %s -declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv64i8.nxv64i8( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv64i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -724,13 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -748,13 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -772,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -796,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -820,13 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -844,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -892,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -916,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -940,13 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -964,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -999,13 +712,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1034,13 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1069,13 +768,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1478,13 +1170,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f16.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1187,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f16.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1526,13 +1204,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f16.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1550,13 +1221,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f16.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1574,13 +1238,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f16.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1598,13 +1255,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32f16.nxv32f16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1622,13 +1272,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1bf16.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1bf16_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1289,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2bf16.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2bf16_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1670,13 +1306,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4bf16.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4bf16_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1694,13 +1323,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8bf16.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8bf16_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1718,13 +1340,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16bf16.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16bf16_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1742,13 +1357,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv32bf16.nxv32bf16( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32bf16_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1766,13 +1374,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f32.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1790,13 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f32.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1814,13 +1408,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f32.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1838,13 +1425,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f32.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1862,13 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv16f32.nxv16f32( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -1886,13 +1459,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv1f64.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -1910,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv2f64.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -1934,13 +1493,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv4f64.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1510,6 @@ entry: ret %a } -declare @llvm.riscv.vmerge.nxv8f64.nxv8f64( - , - , - , - , - iXLen); - define @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll index 9bd859b3452f2..668af55427891 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfeq.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfeq_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll index babf8de57b7ea..c306ae258cd33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfeq.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfeq_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfeq_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfeq_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfeq_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfeq_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfeq_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfeq.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfeq_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll index 73946dc1a744c..d1d53893ef407 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfge.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfge_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll index 4a9dd2f7d769d..bb2ec4da79f89 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfge.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfge_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfge_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfge_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfge_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfge_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfge_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfge.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfge_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll index fac324ca5c125..384087126bee2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfgt.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfgt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll index c9c5e84937cec..ec05587161e2e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfgt.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfgt_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfgt_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfgt_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfgt_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfgt_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfgt_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfgt.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfgt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll index 8356b7bbd3ff7..da7daba71b1ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfle.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfle_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll index 77d8dda258961..95980bd18e271 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfle.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfle_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfle_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfle_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfle_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfle_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfle_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfle.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfle_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll index 2e1bcc5e87bfc..1e992edcfd45a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmflt.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmflt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll index 0fdae8abe8f6b..24532977116af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmflt.nxv1f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16f16( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f32( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f64( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f64( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f64( - , - , - iXLen); - define @intrinsic_vmflt_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmflt_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmflt_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmflt_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmflt_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmflt_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmflt.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmflt_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll index 283ffc500fdde..90707b4b57b5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfne.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16bf16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -306,11 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -326,13 +249,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -353,11 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -373,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -400,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -420,13 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -447,11 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16bf16.bf16( - , - bfloat, - iXLen); - define @intrinsic_vmfne_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry @@ -467,13 +354,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16bf16.bf16( - , - , - bfloat, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16bf16_bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll index 1d0227f793728..aa09ca123bd6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare @llvm.riscv.vmfne.nxv1f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16f16( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16f16( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f32( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f32( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f64( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f64( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f64( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f64( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f64( - , - , - iXLen); - define @intrinsic_vmfne_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f64( - , - , - , - , - iXLen); - define @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry @@ -663,11 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -683,13 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry @@ -710,11 +542,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -730,13 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry @@ -757,11 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -777,13 +592,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry @@ -804,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv16f16.f16( - , - half, - iXLen); - define @intrinsic_vmfne_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -824,13 +627,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv16f16.f16( - , - , - half, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry @@ -851,11 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -871,13 +662,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry @@ -898,11 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -918,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry @@ -945,11 +717,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -965,13 +732,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry @@ -992,11 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv8f32.f32( - , - float, - iXLen); - define @intrinsic_vmfne_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv8f32.f32( - , - , - float, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry @@ -1039,11 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv1f64.f64( - , - double, - iXLen); - define @intrinsic_vmfne_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1059,13 +802,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv1f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry @@ -1086,11 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv2f64.f64( - , - double, - iXLen); - define @intrinsic_vmfne_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1106,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv2f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry @@ -1133,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.nxv4f64.f64( - , - double, - iXLen); - define @intrinsic_vmfne_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry @@ -1153,13 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmfne.mask.nxv4f64.f64( - , - , - double, - , - iXLen); - define @intrinsic_vmfne_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll index 3922b09f1f02d..961f63cbfbc95 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.smin.nxv8i7(, , , i32) - define @vmin_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vmin_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.smin.nxv1i8(, , , i32) - define @vmin_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i8: ; CHECK: # %bb.0: @@ -81,8 +77,6 @@ define @vmin_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv2i8(, , , i32) - define @vmin_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i8: ; CHECK: # %bb.0: @@ -127,8 +121,6 @@ define @vmin_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv3i8(, , , i32) - define @vmin_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv3i8: ; CHECK: # %bb.0: @@ -173,8 +165,6 @@ define @vmin_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv4i8(, , , i32) - define @vmin_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i8: ; CHECK: # %bb.0: @@ -219,8 +209,6 @@ define @vmin_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv8i8(, , , i32) - define @vmin_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i8: ; CHECK: # %bb.0: @@ -265,8 +253,6 @@ define @vmin_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.smin.nxv16i8(, , , i32) - define @vmin_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv16i8: ; CHECK: # %bb.0: @@ -311,8 +297,6 @@ define @vmin_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smin.nxv32i8(, , , i32) - define @vmin_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv32i8: ; CHECK: # %bb.0: @@ -357,8 +341,6 @@ define @vmin_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.smin.nxv64i8(, , , i32) - define @vmin_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv64i8: ; CHECK: # %bb.0: @@ -405,8 +387,6 @@ define @vmin_vx_nxv64i8_unmasked( %va, i8 % ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.smin.nxv128i8(, , , i32) - define @vmin_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv128i8: ; CHECK: # %bb.0: @@ -459,8 +439,6 @@ define @vmin_vx_nxv128i8_unmasked( %va, i ret %v } -declare @llvm.vp.smin.nxv1i16(, , , i32) - define @vmin_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i16: ; CHECK: # %bb.0: @@ -505,8 +483,6 @@ define @vmin_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv2i16(, , , i32) - define @vmin_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i16: ; CHECK: # %bb.0: @@ -551,8 +527,6 @@ define @vmin_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv4i16(, , , i32) - define @vmin_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i16: ; CHECK: # %bb.0: @@ -597,8 +571,6 @@ define @vmin_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv8i16(, , , i32) - define @vmin_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i16: ; CHECK: # %bb.0: @@ -643,8 +615,6 @@ define @vmin_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.smin.nxv16i16(, , , i32) - define @vmin_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv16i16: ; CHECK: # %bb.0: @@ -689,8 +659,6 @@ define @vmin_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.smin.nxv32i16(, , , i32) - define @vmin_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv32i16: ; CHECK: # %bb.0: @@ -735,8 +703,6 @@ define @vmin_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.smin.nxv1i32(, , , i32) - define @vmin_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i32: ; CHECK: # %bb.0: @@ -781,8 +747,6 @@ define @vmin_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv2i32(, , , i32) - define @vmin_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i32: ; CHECK: # %bb.0: @@ -827,8 +791,6 @@ define @vmin_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv4i32(, , , i32) - define @vmin_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i32: ; CHECK: # %bb.0: @@ -873,8 +835,6 @@ define @vmin_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv8i32(, , , i32) - define @vmin_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i32: ; CHECK: # %bb.0: @@ -919,8 +879,6 @@ define @vmin_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.smin.nxv16i32(, , , i32) - define @vmin_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv16i32: ; CHECK: # %bb.0: @@ -967,8 +925,6 @@ define @vmin_vx_nxv16i32_unmasked( %va, i ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.smin.nxv32i32(, , , i32) - define @vmin_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1024,8 +980,6 @@ define @vmin_vx_nxv32i32_unmasked( %va, i ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vmin_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vmin_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1091,8 +1045,6 @@ define @vmin_vx_nxv32i32_evl_nx16( %va, i ret %v } -declare @llvm.vp.smin.nxv1i64(, , , i32) - define @vmin_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1165,8 +1117,6 @@ define @vmin_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smin.nxv2i64(, , , i32) - define @vmin_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1239,8 +1189,6 @@ define @vmin_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smin.nxv4i64(, , , i32) - define @vmin_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1313,8 +1261,6 @@ define @vmin_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.smin.nxv8i64(, , , i32) - define @vmin_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmin_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin.ll b/llvm/test/CodeGen/RISCV/rvv/vmin.ll index 17ad2442bb695..edd643f08ee43 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmin.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vmin.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll index 59af953fd52d3..631799d24e14c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.umin.nxv8i7(, , , i32) - define @vminu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vminu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.umin.nxv1i8(, , , i32) - define @vminu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -80,8 +76,6 @@ define @vminu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv2i8(, , , i32) - define @vminu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -126,8 +120,6 @@ define @vminu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv3i8(, , , i32) - define @vminu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -172,8 +164,6 @@ define @vminu_vx_nxv3i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv4i8(, , , i32) - define @vminu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -218,8 +208,6 @@ define @vminu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv8i8(, , , i32) - define @vminu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -264,8 +252,6 @@ define @vminu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.umin.nxv16i8(, , , i32) - define @vminu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -310,8 +296,6 @@ define @vminu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umin.nxv32i8(, , , i32) - define @vminu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -356,8 +340,6 @@ define @vminu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.umin.nxv64i8(, , , i32) - define @vminu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -404,8 +386,6 @@ define @vminu_vx_nxv64i8_unmasked( %va, i8 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.umin.nxv128i8(, , , i32) - define @vminu_vx_nxv128i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv128i8: ; CHECK: # %bb.0: @@ -458,8 +438,6 @@ define @vminu_vx_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.umin.nxv1i16(, , , i32) - define @vminu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -504,8 +482,6 @@ define @vminu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv2i16(, , , i32) - define @vminu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -550,8 +526,6 @@ define @vminu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv4i16(, , , i32) - define @vminu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -596,8 +570,6 @@ define @vminu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv8i16(, , , i32) - define @vminu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -642,8 +614,6 @@ define @vminu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.umin.nxv16i16(, , , i32) - define @vminu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -688,8 +658,6 @@ define @vminu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.umin.nxv32i16(, , , i32) - define @vminu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -734,8 +702,6 @@ define @vminu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.umin.nxv1i32(, , , i32) - define @vminu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -780,8 +746,6 @@ define @vminu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv2i32(, , , i32) - define @vminu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -826,8 +790,6 @@ define @vminu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv4i32(, , , i32) - define @vminu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -872,8 +834,6 @@ define @vminu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv8i32(, , , i32) - define @vminu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -918,8 +878,6 @@ define @vminu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.umin.nxv16i32(, , , i32) - define @vminu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -966,8 +924,6 @@ define @vminu_vx_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.umin.nxv32i32(, , , i32) - define @vminu_vx_nxv32i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vx_nxv32i32: ; CHECK: # %bb.0: @@ -1023,8 +979,6 @@ define @vminu_vx_nxv32i32_unmasked( %va, ; Test splitting when the %evl is a constant (albeit an unknown one). -declare i32 @llvm.vscale.i32() - define @vminu_vx_nxv32i32_evl_nx8( %va, i32 %b, %m) { ; RV32-LABEL: vminu_vx_nxv32i32_evl_nx8: ; RV32: # %bb.0: @@ -1090,8 +1044,6 @@ define @vminu_vx_nxv32i32_evl_nx16( %va, ret %v } -declare @llvm.vp.umin.nxv1i64(, , , i32) - define @vminu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1164,8 +1116,6 @@ define @vminu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umin.nxv2i64(, , , i32) - define @vminu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1238,8 +1188,6 @@ define @vminu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umin.nxv4i64(, , , i32) - define @vminu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1312,8 +1260,6 @@ define @vminu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.umin.nxv8i64(, , , i32) - define @vminu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vminu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu.ll b/llvm/test/CodeGen/RISCV/rvv/vminu.ll index ba86de4adb0bb..251f833d75faa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vminu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vminu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll index 3406aebc4f8a8..4545f7009413b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmnand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmnand.nxv1i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv2i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv4i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv8i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv16i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv32i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmnand.nxv64i1( - , - , - iXLen); - define @intrinsic_vmnand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll index afd85767004df..1d19c7398220f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmnor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmnor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor.ll b/llvm/test/CodeGen/RISCV/rvv/vmor.ll index bfd873186e83f..d42c46f785961 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll index ebc5c3a23c35a..8018cfeab9239 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmorn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmorn.nxv1i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv2i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv4i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv8i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv16i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv32i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmorn.nxv64i1( - , - , - iXLen); - define @intrinsic_vmorn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll index 9ce7d68ba4012..b052d6725822b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,12 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -73,12 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -119,12 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -165,12 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -211,12 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -257,12 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -303,12 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -349,12 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -395,12 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -441,12 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -487,12 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -533,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -579,12 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -602,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -625,12 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -648,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8( - , - i8, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -671,12 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -694,12 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -717,12 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -740,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -763,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -786,12 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16( - , - i16, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -809,12 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -855,12 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -901,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32( - , - i32, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -960,12 +714,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -996,12 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1032,12 +774,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64( - , - i64, - , - iXLen); - define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll index 8c870a9332646..465639d366a93 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsbc.nxv1i8.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i8.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i8.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i8.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i8.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i8.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i8.nxv64i8( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i16.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i16.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i16.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i16.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i16.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i16.nxv32i16( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i32.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i32.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i32.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i32.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i32.nxv16i32( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i64.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -384,11 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i64.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -404,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i64.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -424,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i64.nxv8i64( - , - , - iXLen); - define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -444,11 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -464,11 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -484,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -504,11 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -524,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -544,11 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -564,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv64i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -584,11 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -604,11 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -624,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -644,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -664,11 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -684,11 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv32i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -704,11 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -724,11 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -744,11 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -764,11 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -784,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv16i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -804,11 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -836,11 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -868,11 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -900,11 +685,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbc.nxv8i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll index 80e74faa8cd91..1f6c8ef61416c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmsbf.nxv1i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -22,12 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,10 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv2i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -64,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv2i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,10 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv4i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -106,12 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv4i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,10 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv8i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -148,12 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv8i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,10 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv16i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -190,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv16i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,10 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv32i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -232,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv32i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,10 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.nxv64i1( - , - iXLen); - define @intrinsic_vmsbf_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -274,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsbf.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll index 6407f39a65e8b..c87010b144696 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmseq.nxv1i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv32i8( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i16( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i32( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i64( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i64( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i64( - , - , - iXLen); - define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmseq_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmseq_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmseq_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmseq.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmseq_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset.ll b/llvm/test/CodeGen/RISCV/rvv/vmset.ll index 0c63d7a852143..e2853310e213c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmset.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmset.ll @@ -4,9 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmset.nxv1i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -20,9 +17,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv2i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -36,9 +30,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv4i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -52,9 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv8i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -68,9 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv16i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -84,9 +69,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv32i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -100,9 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmset.nxv64i1( - iXLen); - define @intrinsic_vmset_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll index 45e3840f7e673..21a4143f323ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsge.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -943,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -970,11 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1018,11 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1039,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1104,12 +852,6 @@ entry: ret %a } - -declare @llvm.riscv.vmsge.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1126,13 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1153,11 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1174,13 +904,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1201,11 +924,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1222,13 +940,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1249,11 +960,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1270,13 +976,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1297,11 +996,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1032,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1366,13 +1048,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,11 +1068,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1414,13 +1084,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1441,11 +1104,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1120,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1489,11 +1140,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1510,13 +1156,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1537,11 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1558,13 +1192,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1585,11 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1606,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1633,11 +1248,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1654,13 +1264,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1681,11 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1714,13 +1312,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1756,11 +1347,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1789,13 +1375,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1831,11 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1864,13 +1438,6 @@ entry: ret %a } -declare @llvm.riscv.vmsge.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll index d3f57d58c7ab7..4795e86983089 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsgeu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -943,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -970,11 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1018,11 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1039,13 +794,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,11 +814,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1087,13 +830,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1114,11 +850,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1135,13 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1162,11 +886,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,13 +902,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,11 +922,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1231,13 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1258,11 +958,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1279,13 +974,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1306,11 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1327,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1354,11 +1030,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1375,13 +1046,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1402,11 +1066,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1082,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1450,11 +1102,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1471,13 +1118,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1498,11 +1138,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1519,13 +1154,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1546,11 +1174,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1567,13 +1190,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1594,11 +1210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1615,13 +1226,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1642,11 +1246,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1675,13 +1274,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1717,11 +1309,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1750,13 +1337,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1792,11 +1372,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1825,13 +1400,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll index 62ac44bfdf38c..f7b5cad0c8ed9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsgt.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgt_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgt_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgt_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgt.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll index d57b9cd5bae53..2f79b1ca2b93e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsgtu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgtu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgtu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsgtu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsgtu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsgtu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll index 9c70dcab1efde..00a3673b0f415 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmsif.nxv1i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -22,12 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,10 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv2i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -64,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv2i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,10 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv4i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -106,12 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv4i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,10 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv8i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -148,12 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv8i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,10 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv16i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -190,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv16i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,10 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv32i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -232,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv32i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,10 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.nxv64i1( - , - iXLen); - define @intrinsic_vmsif_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -274,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsif.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll index 9653dfd2518d8..ec60f75bb206d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsle.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsle_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsle_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsle_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsle.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsle_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll index 25ecfa65c7c48..1c57fc9002857 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsleu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsleu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsleu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsleu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsleu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsleu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll index c17495e3b2119..e528d07dac51f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmslt.nxv1i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv32i8( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i16( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i32( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i64( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i64( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i64( - , - , - iXLen); - define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmslt_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmslt_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmslt_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmslt.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmslt_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll index a37a02848365d..8a909d0c03715 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsltu.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsltu_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsltu_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsltu_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsltu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsltu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll index ed41a18dcc8d3..7e27d98cf9161 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmsne.nxv1i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,11 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +58,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -106,11 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -126,13 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -157,11 +121,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -177,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -208,11 +160,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -228,13 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -259,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv32i8( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -279,13 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -310,11 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -330,13 +253,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -361,11 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,13 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,11 +316,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,13 +331,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -463,11 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -483,13 +370,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -514,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i16( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -534,13 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -565,11 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -585,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -616,11 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -636,13 +487,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -667,11 +511,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -687,13 +526,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,11 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i32( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -738,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -769,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i64( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -789,13 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -820,11 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i64( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -840,13 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -871,11 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i64( - , - , - iXLen); - define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -891,13 +682,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -922,11 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -942,13 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -969,11 +741,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -989,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1016,11 +776,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1036,13 +791,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1063,11 +811,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1083,13 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1110,11 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1130,13 +861,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1157,11 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv32i8.i8( - , - i8, - iXLen); - define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1177,13 +896,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,11 +916,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1224,13 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1251,11 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1271,13 +966,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1298,11 +986,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1318,13 +1001,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1345,11 +1021,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1365,13 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,11 +1056,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv16i16.i16( - , - i16, - iXLen); - define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1412,13 +1071,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1439,11 +1091,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1459,13 +1106,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1486,11 +1126,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1141,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1533,11 +1161,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1553,13 +1176,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1580,11 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv8i32.i32( - , - i32, - iXLen); - define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1627,11 +1231,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv1i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsne_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1659,13 +1258,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1701,11 +1293,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv2i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsne_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1733,13 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1775,11 +1355,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.nxv4i64.i64( - , - i64, - iXLen); - define @intrinsic_vmsne_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1807,13 +1382,6 @@ entry: ret %a } -declare @llvm.riscv.vmsne.mask.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vmsne_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll index 4b818a2b1e58f..4620ac5cc5c14 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll @@ -4,10 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmsof.nxv1i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -22,12 +18,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv1i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -46,10 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv2i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -64,12 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv2i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -88,10 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv4i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -106,12 +82,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv4i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -130,10 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv8i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -148,12 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv8i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -172,10 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv16i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -190,12 +146,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv16i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -214,10 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv32i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -232,12 +178,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv32i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -256,10 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.nxv64i1( - , - iXLen); - define @intrinsic_vmsof_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -274,12 +210,6 @@ entry: ret %a } -declare @llvm.riscv.vmsof.mask.nxv64i1( - , - , - , - iXLen); - define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll index 419b55124a501..517808ee98d60 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare @llvm.vp.mul.nxv2i1(, , , i32) - define @vmul_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define @vmul_vv_nxv2i1( %va, %v } -declare @llvm.vp.mul.nxv4i1(, , , i32) - define @vmul_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define @vmul_vv_nxv4i1( %va, %v } -declare @llvm.vp.mul.nxv8i1(, , , i32) - define @vmul_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define @vmul_vv_nxv8i1( %va, %v } -declare @llvm.vp.mul.nxv16i1(, , , i32) - define @vmul_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i1: ; CHECK: # %bb.0: @@ -53,8 +44,6 @@ define @vmul_vv_nxv16i1( %va, %v } -declare @llvm.vp.mul.nxv32i1(, , , i32) - define @vmul_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll index 14a236e071551..8d690bc71a2ef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv8i7(, , , i32) - define @vmul_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vmul_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.mul.nxv1i8(, , , i32) - define @vmul_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define @vmul_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv2i8(, , , i32) - define @vmul_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i8: ; CHECK: # %bb.0: @@ -110,8 +104,6 @@ define @vmul_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv4i8(, , , i32) - define @vmul_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i8: ; CHECK: # %bb.0: @@ -156,8 +148,6 @@ define @vmul_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv8i8(, , , i32) - define @vmul_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i8: ; CHECK: # %bb.0: @@ -202,8 +192,6 @@ define @vmul_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.mul.nxv16i8(, , , i32) - define @vmul_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i8: ; CHECK: # %bb.0: @@ -248,8 +236,6 @@ define @vmul_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.mul.nxv32i8(, , , i32) - define @vmul_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i8: ; CHECK: # %bb.0: @@ -294,8 +280,6 @@ define @vmul_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.mul.nxv64i8(, , , i32) - define @vmul_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv64i8: ; CHECK: # %bb.0: @@ -340,8 +324,6 @@ define @vmul_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.mul.nxv1i16(, , , i32) - define @vmul_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i16: ; CHECK: # %bb.0: @@ -386,8 +368,6 @@ define @vmul_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv2i16(, , , i32) - define @vmul_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i16: ; CHECK: # %bb.0: @@ -432,8 +412,6 @@ define @vmul_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv4i16(, , , i32) - define @vmul_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i16: ; CHECK: # %bb.0: @@ -478,8 +456,6 @@ define @vmul_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv8i16(, , , i32) - define @vmul_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i16: ; CHECK: # %bb.0: @@ -524,8 +500,6 @@ define @vmul_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.mul.nxv16i16(, , , i32) - define @vmul_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i16: ; CHECK: # %bb.0: @@ -570,8 +544,6 @@ define @vmul_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.mul.nxv32i16(, , , i32) - define @vmul_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i16: ; CHECK: # %bb.0: @@ -616,8 +588,6 @@ define @vmul_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.mul.nxv1i32(, , , i32) - define @vmul_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i32: ; CHECK: # %bb.0: @@ -662,8 +632,6 @@ define @vmul_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv2i32(, , , i32) - define @vmul_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i32: ; CHECK: # %bb.0: @@ -708,8 +676,6 @@ define @vmul_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv4i32(, , , i32) - define @vmul_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i32: ; CHECK: # %bb.0: @@ -754,8 +720,6 @@ define @vmul_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv7i32(, , , i32) - define @vmul_vv_nxv7i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv7i32: ; CHECK: # %bb.0: @@ -800,8 +764,6 @@ define @vmul_vx_nxv7i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv8i32(, , , i32) - define @vmul_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i32: ; CHECK: # %bb.0: @@ -846,8 +808,6 @@ define @vmul_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.mul.nxv16i32(, , , i32) - define @vmul_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i32: ; CHECK: # %bb.0: @@ -904,8 +864,6 @@ define @vmul_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.mul.nxv1i64(, , , i32) - define @vmul_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i64: ; CHECK: # %bb.0: @@ -978,8 +936,6 @@ define @vmul_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.mul.nxv2i64(, , , i32) - define @vmul_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1052,8 +1008,6 @@ define @vmul_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.mul.nxv4i64(, , , i32) - define @vmul_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1126,8 +1080,6 @@ define @vmul_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.mul.nxv8i64(, , , i32) - define @vmul_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i64: ; CHECK: # %bb.0: @@ -1350,8 +1302,6 @@ define @vmul_vx_negpow2_nxv8i64_unmasked( % ret %v } -declare @llvm.vp.shl.nxv8i64(, , , i32) - define @vmul_vshl_vx_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vshl_vx_nxv8i64: ; CHECK: # %bb.0: @@ -1416,8 +1366,6 @@ define @vmul_vshl_vv_nxv8i64_unmasked( %va, ret %v } -declare @llvm.vp.add.nxv8i64(, , , i32) - define @vmul_vadd_vx_nxv8i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vadd_vx_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul.ll b/llvm/test/CodeGen/RISCV/rvv/vmul.ll index 913232f7aedfc..90b44c6e6800c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul.ll @@ -8,12 +8,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64d \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmul.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -30,13 +24,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -54,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +57,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -122,13 +90,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -146,12 +107,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,13 +156,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -238,12 +173,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -260,13 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -284,12 +206,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -306,13 +222,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -331,12 +240,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -353,13 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -377,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -399,13 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -423,12 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -445,13 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -469,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -491,13 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -515,12 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -537,13 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -561,12 +405,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -583,13 +421,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -608,12 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -630,13 +455,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -654,12 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -676,13 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -700,12 +505,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -722,13 +521,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -746,12 +538,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -768,13 +554,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -792,12 +571,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -814,13 +587,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -839,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -861,13 +621,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -885,12 +638,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -907,13 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -931,12 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -953,13 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -977,12 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -999,13 +720,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1024,12 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1046,13 +754,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1070,12 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1092,13 +787,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1116,12 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1138,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1162,12 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1184,13 +853,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1208,12 +870,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,13 +886,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1254,12 +903,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1276,13 +919,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1300,12 +936,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1322,13 +952,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,12 +969,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1368,13 +985,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1392,12 +1002,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1414,13 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1438,12 +1035,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1460,13 +1051,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1484,12 +1068,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1506,13 +1084,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1530,12 +1101,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1552,13 +1117,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1576,12 +1134,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1598,13 +1150,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1622,12 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1644,13 +1183,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1668,12 +1200,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1690,13 +1216,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1714,12 +1233,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1736,13 +1249,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1760,12 +1266,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1782,13 +1282,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1806,12 +1299,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1828,13 +1315,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1852,12 +1332,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1886,13 +1360,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,12 +1389,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1956,13 +1417,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1992,12 +1446,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2026,13 +1474,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2062,12 +1503,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2096,13 +1531,6 @@ entry: ret %a } -declare @llvm.riscv.vmul.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll index 12d83ba58898f..bd2eac51207c1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll @@ -10,12 +10,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulh -declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -32,13 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -170,13 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -194,12 +142,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -355,13 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -379,12 +275,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -401,13 +291,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -425,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -447,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -471,12 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -493,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -517,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -539,13 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -563,12 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -585,13 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -610,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -632,13 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -656,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -678,13 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,12 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -724,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -748,12 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -770,13 +556,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -794,12 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -816,13 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -841,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -863,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -887,12 +640,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -909,13 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -933,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -955,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -979,12 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1001,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1026,12 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1048,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1072,12 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1094,13 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1118,12 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1140,13 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1186,13 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,12 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1232,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1278,13 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1302,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,13 +954,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1348,12 +971,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1370,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1394,12 +1004,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1416,13 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,12 +1037,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1103,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1554,13 +1119,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1136,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1624,12 +1169,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1185,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1670,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1692,13 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1716,12 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1738,13 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,13 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1808,12 +1301,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1317,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1854,12 +1334,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1888,13 +1362,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1924,12 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1958,13 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1994,12 +1448,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2028,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,12 +1505,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2098,13 +1533,6 @@ entry: ret %a } -declare @llvm.riscv.vmulh.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll index 5a785d8a678b1..90ec0a6766e24 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll @@ -10,12 +10,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhsu -declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -32,13 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -170,13 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -194,12 +142,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -355,13 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -379,12 +275,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -401,13 +291,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -425,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -447,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -471,12 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -493,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -517,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -539,13 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -563,12 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -585,13 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -610,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -632,13 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -656,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -678,13 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,12 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -724,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -748,12 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -770,13 +556,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -794,12 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -816,13 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -841,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -863,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -887,12 +640,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -909,13 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -933,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -955,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -979,12 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1001,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1026,12 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1048,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1072,12 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1094,13 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1118,12 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1140,13 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1186,13 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,12 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1232,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1278,13 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1302,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,13 +954,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1348,12 +971,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1370,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1394,12 +1004,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1416,13 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,12 +1037,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1103,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1554,13 +1119,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1136,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1624,12 +1169,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1185,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1670,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1692,13 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1716,12 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1738,13 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,13 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1808,12 +1301,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1317,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1854,12 +1334,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1888,13 +1362,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1924,12 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1958,13 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1994,12 +1448,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2028,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,12 +1505,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2098,13 +1533,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhsu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll index 24b47da8d0b4b..8c0d7ffb5084c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll @@ -10,12 +10,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhu -declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -32,13 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -78,13 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +76,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -170,13 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -194,12 +142,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -240,12 +175,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,13 +224,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -355,13 +258,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -379,12 +275,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -401,13 +291,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -425,12 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -447,13 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -471,12 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -493,13 +357,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -517,12 +374,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -539,13 +390,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -563,12 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -585,13 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -610,12 +441,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -632,13 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -656,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -678,13 +490,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,12 +507,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -724,13 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -748,12 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -770,13 +556,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -794,12 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -816,13 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -841,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -863,13 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -887,12 +640,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -909,13 +656,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -933,12 +673,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -955,13 +689,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -979,12 +706,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1001,13 +722,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1026,12 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1048,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1072,12 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1094,13 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1118,12 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1140,13 +822,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1186,13 +855,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1210,12 +872,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1232,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1256,12 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1278,13 +921,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1302,12 +938,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,13 +954,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1348,12 +971,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1370,13 +987,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1394,12 +1004,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1416,13 +1020,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,12 +1037,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1462,13 +1053,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1486,12 +1070,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1086,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1532,12 +1103,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1554,13 +1119,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1578,12 +1136,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1600,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1624,12 +1169,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1646,13 +1185,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1670,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1692,13 +1218,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1716,12 +1235,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1738,13 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1268,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1784,13 +1284,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1808,12 +1301,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1830,13 +1317,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1854,12 +1334,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1888,13 +1362,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1924,12 +1391,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1958,13 +1419,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1994,12 +1448,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2028,13 +1476,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,12 +1505,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2098,13 +1533,6 @@ entry: ret %a } -declare @llvm.riscv.vmulhu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll index 4629db26ca034..4afe4c360be31 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmv.s.x.nxv1i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv16i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv16i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv32i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv32i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv64i8(, i8, iXLen); - define @intrinsic_vmv.s.x_x_nxv64i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -95,8 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -108,8 +92,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -121,8 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -134,8 +114,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -147,8 +125,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv16i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv16i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -160,8 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv32i16(, i16, iXLen); - define @intrinsic_vmv.s.x_x_nxv32i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -173,8 +147,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -186,8 +158,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -199,8 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -212,8 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -225,8 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv16i32(, i32, iXLen); - define @intrinsic_vmv.s.x_x_nxv16i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -238,8 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv1i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv1i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -264,8 +226,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv2i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv2i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv2i64: ; RV32: # %bb.0: # %entry @@ -290,8 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv4i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv4i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv4i64: ; RV32: # %bb.0: # %entry @@ -316,8 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.s.x.nxv8i64(, i64, iXLen); - define @intrinsic_vmv.s.x_x_nxv8i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv8i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll index 784b807a6a2e5..bfb44e0944d59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmv.v.v.nxv1i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv64i8( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32i16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16i32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -384,11 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -404,11 +304,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -424,11 +319,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8i64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -444,11 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -464,11 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -484,11 +364,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -504,11 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -524,11 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -544,11 +409,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32f16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -564,11 +424,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -584,11 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -604,11 +454,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -624,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -644,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -664,11 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv32bf16( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -684,11 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -704,11 +529,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -724,11 +544,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -744,11 +559,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -764,11 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv16f32( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -784,11 +589,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv1f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -804,11 +604,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv2f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -824,11 +619,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv4f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -844,11 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.v.nxv8f64( - , - , - iXLen); - define @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll index 472cd6ec07e23..79f4bb72c01aa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vmv.v.x.nxv1i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv64i8( - , - i8, - iXLen); - define @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -144,11 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -164,11 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -184,11 +139,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -204,11 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -224,11 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -244,11 +184,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv32i16( - , - i16, - iXLen); - define @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -264,11 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -284,11 +214,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -304,11 +229,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -324,11 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -344,11 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv16i32( - , - i32, - iXLen); - define @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -364,11 +274,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv1i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64: ; RV32: # %bb.0: # %entry @@ -395,11 +300,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv2i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv2i64: ; RV32: # %bb.0: # %entry @@ -426,11 +326,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv4i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv4i64: ; RV32: # %bb.0: # %entry @@ -457,11 +352,6 @@ entry: ret %a } -declare @llvm.riscv.vmv.v.x.nxv8i64( - , - i64, - iXLen); - define @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, iXLen %1) nounwind { ; RV32-LABEL: intrinsic_vmv.v.x_x_nxv8i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll index 0ec9439e04a08..df3cbe101658d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.riscv.vmv.x.s.nxv1i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv2i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv4i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv8i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv16i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv32i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret i8 %a } -declare i8 @llvm.riscv.vmv.x.s.nxv64i8() - define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -95,8 +81,6 @@ entry: ret i8 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv1i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -108,8 +92,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv2i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -121,8 +103,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv4i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -134,8 +114,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv8i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -147,8 +125,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv16i16() - define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -160,8 +136,6 @@ entry: ret i16 %a } -declare i16 @llvm.riscv.vmv.x.s.nxv32i16( ) - define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -173,8 +147,6 @@ entry: ret i16 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv1i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -186,8 +158,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv2i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -199,8 +169,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv4i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -212,8 +180,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv8i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -225,8 +191,6 @@ entry: ret i32 %a } -declare i32 @llvm.riscv.vmv.x.s.nxv16i32( ) - define signext i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -238,8 +202,6 @@ entry: ret i32 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv1i64( ) - define i64 @intrinsic_vmv.x.s_s_nxv1i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv1i64: ; RV32: # %bb.0: # %entry @@ -260,8 +222,6 @@ entry: ret i64 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv2i64( ) - define i64 @intrinsic_vmv.x.s_s_nxv2i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv2i64: ; RV32: # %bb.0: # %entry @@ -282,8 +242,6 @@ entry: ret i64 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv4i64( ) - define i64 @intrinsic_vmv.x.s_s_nxv4i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv4i64: ; RV32: # %bb.0: # %entry @@ -304,8 +262,6 @@ entry: ret i64 %a } -declare i64 @llvm.riscv.vmv.x.s.nxv8i64() - define i64 @intrinsic_vmv.x.s_s_nxv8i64( %0) nounwind { ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv8i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll index fc1bb4feedc4a..9107a08cb1a05 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmxnor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmxnor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmxnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll index dc75fc3e7cd38..3c6a4aabcaf95 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vmxor.nxv1i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv2i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv4i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv8i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv16i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv32i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -124,11 +94,6 @@ entry: ret %a } -declare @llvm.riscv.vmxor.nxv64i1( - , - , - iXLen); - define @intrinsic_vmxor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll index 1c389f522e844..05bd6b9123b5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +127,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +145,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,13 +163,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +181,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,13 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +217,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,13 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,13 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +287,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,13 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +323,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,13 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +359,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,13 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,13 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,11 +538,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -755,13 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -780,11 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -802,13 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -827,11 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -849,13 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -874,11 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -897,13 +661,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,11 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -945,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,11 +715,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1018,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1040,13 +768,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1065,11 +786,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +803,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1112,11 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1135,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1160,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1183,13 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1208,11 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1256,11 +929,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1278,13 +946,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1303,11 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1326,13 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1351,11 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1374,13 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1399,11 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1054,6 @@ entry: ret %a } -declare @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll index 21dc859c3bf23..0e7682cc9411a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +39,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,13 +56,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,13 +91,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +109,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +127,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +145,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,13 +163,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +181,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,13 +199,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +217,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,13 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,13 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +287,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,13 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +323,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,13 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +359,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,13 +377,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,13 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen, iXLen); - define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,13 +520,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,11 +538,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -755,13 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -780,11 +573,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -802,13 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -827,11 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -849,13 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -874,11 +643,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -897,13 +661,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,11 +679,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -945,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -970,11 +715,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1018,11 +751,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1040,13 +768,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1065,11 +786,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1087,13 +803,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1112,11 +821,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1135,13 +839,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1160,11 +857,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1183,13 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1208,11 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1231,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1256,11 +929,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1278,13 +946,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1303,11 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1326,13 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1351,11 +1000,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1374,13 +1018,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1399,11 +1036,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( - , - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1054,6 @@ entry: ret %a } -declare @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, iXLen, iXLen); - define @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll index 3484d288088a0..1c4294990f90a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.mul.nxv1i8(, , , i32) -declare @llvm.vp.sub.nxv1i8(, , , i32) -declare @llvm.vp.merge.nxv1i8(, , , i32) -declare @llvm.vp.select.nxv1i8(, , , i32) - define @vnmsac_vv_nxv1i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i8: ; CHECK: # %bb.0: @@ -93,11 +88,6 @@ define @vnmsac_vx_nxv1i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv2i8(, , , i32) -declare @llvm.vp.sub.nxv2i8(, , , i32) -declare @llvm.vp.merge.nxv2i8(, , , i32) -declare @llvm.vp.select.nxv2i8(, , , i32) - define @vnmsac_vv_nxv2i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i8: ; CHECK: # %bb.0: @@ -182,11 +172,6 @@ define @vnmsac_vx_nxv2i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv4i8(, , , i32) -declare @llvm.vp.sub.nxv4i8(, , , i32) -declare @llvm.vp.merge.nxv4i8(, , , i32) -declare @llvm.vp.select.nxv4i8(, , , i32) - define @vnmsac_vv_nxv4i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i8: ; CHECK: # %bb.0: @@ -271,11 +256,6 @@ define @vnmsac_vx_nxv4i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv8i8(, , , i32) -declare @llvm.vp.sub.nxv8i8(, , , i32) -declare @llvm.vp.merge.nxv8i8(, , , i32) -declare @llvm.vp.select.nxv8i8(, , , i32) - define @vnmsac_vv_nxv8i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i8: ; CHECK: # %bb.0: @@ -360,11 +340,6 @@ define @vnmsac_vx_nxv8i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv16i8(, , , i32) -declare @llvm.vp.sub.nxv16i8(, , , i32) -declare @llvm.vp.merge.nxv16i8(, , , i32) -declare @llvm.vp.select.nxv16i8(, , , i32) - define @vnmsac_vv_nxv16i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i8: ; CHECK: # %bb.0: @@ -449,11 +424,6 @@ define @vnmsac_vx_nxv16i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv32i8(, , , i32) -declare @llvm.vp.sub.nxv32i8(, , , i32) -declare @llvm.vp.merge.nxv32i8(, , , i32) -declare @llvm.vp.select.nxv32i8(, , , i32) - define @vnmsac_vv_nxv32i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i8: ; CHECK: # %bb.0: @@ -538,11 +508,6 @@ define @vnmsac_vx_nxv32i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv64i8(, , , i32) -declare @llvm.vp.sub.nxv64i8(, , , i32) -declare @llvm.vp.merge.nxv64i8(, , , i32) -declare @llvm.vp.select.nxv64i8(, , , i32) - define @vnmsac_vv_nxv64i8( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv64i8: ; CHECK: # %bb.0: @@ -630,11 +595,6 @@ define @vnmsac_vx_nxv64i8_ta( %a, i8 %b, %u } -declare @llvm.vp.mul.nxv1i16(, , , i32) -declare @llvm.vp.sub.nxv1i16(, , , i32) -declare @llvm.vp.merge.nxv1i16(, , , i32) -declare @llvm.vp.select.nxv1i16(, , , i32) - define @vnmsac_vv_nxv1i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i16: ; CHECK: # %bb.0: @@ -719,11 +679,6 @@ define @vnmsac_vx_nxv1i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv2i16(, , , i32) -declare @llvm.vp.sub.nxv2i16(, , , i32) -declare @llvm.vp.merge.nxv2i16(, , , i32) -declare @llvm.vp.select.nxv2i16(, , , i32) - define @vnmsac_vv_nxv2i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i16: ; CHECK: # %bb.0: @@ -808,11 +763,6 @@ define @vnmsac_vx_nxv2i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv4i16(, , , i32) -declare @llvm.vp.sub.nxv4i16(, , , i32) -declare @llvm.vp.merge.nxv4i16(, , , i32) -declare @llvm.vp.select.nxv4i16(, , , i32) - define @vnmsac_vv_nxv4i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i16: ; CHECK: # %bb.0: @@ -897,11 +847,6 @@ define @vnmsac_vx_nxv4i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv8i16(, , , i32) -declare @llvm.vp.sub.nxv8i16(, , , i32) -declare @llvm.vp.merge.nxv8i16(, , , i32) -declare @llvm.vp.select.nxv8i16(, , , i32) - define @vnmsac_vv_nxv8i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i16: ; CHECK: # %bb.0: @@ -986,11 +931,6 @@ define @vnmsac_vx_nxv8i16_ta( %a, i16 %b, < ret %u } -declare @llvm.vp.mul.nxv16i16(, , , i32) -declare @llvm.vp.sub.nxv16i16(, , , i32) -declare @llvm.vp.merge.nxv16i16(, , , i32) -declare @llvm.vp.select.nxv16i16(, , , i32) - define @vnmsac_vv_nxv16i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1075,11 +1015,6 @@ define @vnmsac_vx_nxv16i16_ta( %a, i16 %b ret %u } -declare @llvm.vp.mul.nxv32i16(, , , i32) -declare @llvm.vp.sub.nxv32i16(, , , i32) -declare @llvm.vp.merge.nxv32i16(, , , i32) -declare @llvm.vp.select.nxv32i16(, , , i32) - define @vnmsac_vv_nxv32i16( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1167,11 +1102,6 @@ define @vnmsac_vx_nxv32i16_ta( %a, i16 %b ret %u } -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.sub.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) -declare @llvm.vp.select.nxv1i32(, , , i32) - define @vnmsac_vv_nxv1i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1256,11 +1186,6 @@ define @vnmsac_vx_nxv1i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv2i32(, , , i32) -declare @llvm.vp.sub.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) -declare @llvm.vp.select.nxv2i32(, , , i32) - define @vnmsac_vv_nxv2i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1345,11 +1270,6 @@ define @vnmsac_vx_nxv2i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv4i32(, , , i32) -declare @llvm.vp.sub.nxv4i32(, , , i32) -declare @llvm.vp.merge.nxv4i32(, , , i32) -declare @llvm.vp.select.nxv4i32(, , , i32) - define @vnmsac_vv_nxv4i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1434,11 +1354,6 @@ define @vnmsac_vx_nxv4i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv8i32(, , , i32) -declare @llvm.vp.sub.nxv8i32(, , , i32) -declare @llvm.vp.merge.nxv8i32(, , , i32) -declare @llvm.vp.select.nxv8i32(, , , i32) - define @vnmsac_vv_nxv8i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1523,11 +1438,6 @@ define @vnmsac_vx_nxv8i32_ta( %a, i32 %b, < ret %u } -declare @llvm.vp.mul.nxv16i32(, , , i32) -declare @llvm.vp.sub.nxv16i32(, , , i32) -declare @llvm.vp.merge.nxv16i32(, , , i32) -declare @llvm.vp.select.nxv16i32(, , , i32) - define @vnmsac_vv_nxv16i32( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1615,11 +1525,6 @@ define @vnmsac_vx_nxv16i32_ta( %a, i32 %b ret %u } -declare @llvm.vp.mul.nxv1i64(, , , i32) -declare @llvm.vp.sub.nxv1i64(, , , i32) -declare @llvm.vp.merge.nxv1i64(, , , i32) -declare @llvm.vp.select.nxv1i64(, , , i32) - define @vnmsac_vv_nxv1i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1751,11 +1656,6 @@ define @vnmsac_vx_nxv1i64_ta( %a, i64 %b, < ret %u } -declare @llvm.vp.mul.nxv2i64(, , , i32) -declare @llvm.vp.sub.nxv2i64(, , , i32) -declare @llvm.vp.merge.nxv2i64(, , , i32) -declare @llvm.vp.select.nxv2i64(, , , i32) - define @vnmsac_vv_nxv2i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1887,11 +1787,6 @@ define @vnmsac_vx_nxv2i64_ta( %a, i64 %b, < ret %u } -declare @llvm.vp.mul.nxv4i64(, , , i32) -declare @llvm.vp.sub.nxv4i64(, , , i32) -declare @llvm.vp.merge.nxv4i64(, , , i32) -declare @llvm.vp.select.nxv4i64(, , , i32) - define @vnmsac_vv_nxv4i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv4i64: ; CHECK: # %bb.0: @@ -2023,11 +1918,6 @@ define @vnmsac_vx_nxv4i64_ta( %a, i64 %b, < ret %u } -declare @llvm.vp.mul.nxv8i64(, , , i32) -declare @llvm.vp.sub.nxv8i64(, , , i32) -declare @llvm.vp.merge.nxv8i64(, , , i32) -declare @llvm.vp.select.nxv8i64(, , , i32) - define @vnmsac_vv_nxv8i64( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vnmsac_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll index 505443d93720b..767caf94f16f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vnmsac.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsac.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll index d9c7560830fec..8e858f1143d43 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vnmsub.nxv1i8.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i8.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i8.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i8.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i8.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv32i8.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i16.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i16.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i16.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i16.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i16.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i64.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i64.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv32i8.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv32i8.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv16i16.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv16i16.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1414,13 +994,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1437,13 +1010,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1461,13 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1484,13 +1043,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1508,13 +1060,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv8i32.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1531,13 +1076,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv8i32.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1555,13 +1093,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv1i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1591,13 +1122,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv1i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64: ; RV32: # %bb.0: # %entry @@ -1628,13 +1152,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv2i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1664,13 +1181,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv2i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64: ; RV32: # %bb.0: # %entry @@ -1701,13 +1211,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.nxv4i64.i64( - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry @@ -1737,13 +1240,6 @@ entry: ret %a } -declare @llvm.riscv.vnmsub.mask.nxv4i64.i64( - , - i64, - , - , - iXLen, iXLen); - define @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll index cb7a020d0b964..9b35e3e62aa87 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll @@ -2,10 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.trunc.nxv1i16.nxv1i32(, , i32) -declare @llvm.vp.ashr.nxv1i32(, , , i32) - define @vsra_vv_nxv1i16( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: @@ -18,7 +14,6 @@ define @vsra_vv_nxv1i16( %a, %vr } - define @vsra_vv_nxv1i16_unmasked( %a, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: @@ -31,10 +26,6 @@ define @vsra_vv_nxv1i16_unmasked( %a, %vr } -declare @llvm.vp.sext.nxv1i64.nxv1i32(, , i32) -declare @llvm.vp.trunc.nxv1i32.nxv1i64(, , i32) -declare @llvm.vp.ashr.nxv1i64(, , , i32) - define @vsra_vv_nxv1i64( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra.ll index 78c31d3403471..ac94aefc91c2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,14 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,14 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -241,12 +171,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -264,14 +188,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -289,12 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -311,14 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -336,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -358,14 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -383,12 +271,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -406,14 +288,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -431,12 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,14 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -479,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -502,14 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -527,12 +373,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -549,14 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -574,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -597,14 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -622,12 +440,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -645,14 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -670,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -693,14 +491,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -718,12 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -740,14 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -765,12 +541,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -787,14 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -812,12 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -834,14 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -859,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -882,14 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -907,12 +641,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -930,14 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -955,12 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -978,14 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1003,12 +709,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1025,14 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1050,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1072,14 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1097,12 +775,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1120,14 +792,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1145,12 +809,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1168,14 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1193,12 +843,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1216,14 +860,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1241,12 +877,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1263,14 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1288,12 +910,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1311,14 +927,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1336,12 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1359,14 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1384,12 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1407,14 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll index e6e86011745b4..bffd30df68353 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll @@ -2,10 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.trunc.nxv1i16.nxv1i32(, , i32) -declare @llvm.vp.lshr.nxv1i32(, , , i32) - define @vsra_vv_nxv1i16( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: @@ -18,7 +14,6 @@ define @vsra_vv_nxv1i16( %a, %vr } - define @vsra_vv_nxv1i16_unmasked( %a, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: @@ -31,10 +26,6 @@ define @vsra_vv_nxv1i16_unmasked( %a, %vr } -declare @llvm.vp.sext.nxv1i64.nxv1i32(, , i32) -declare @llvm.vp.trunc.nxv1i32.nxv1i64(, , i32) -declare @llvm.vp.lshr.nxv1i64(, , , i32) - define @vsra_vv_nxv1i64( %a, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll index d5586d333a554..4e4dfc4de7eb7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,14 +120,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -193,12 +137,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -216,14 +154,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -241,12 +171,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -264,14 +188,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -289,12 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -311,14 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -336,12 +238,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -358,14 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -383,12 +271,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -406,14 +288,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -431,12 +305,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,14 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -479,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -502,14 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -527,12 +373,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -549,14 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -574,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -597,14 +423,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -622,12 +440,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -645,14 +457,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -670,12 +474,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -693,14 +491,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -718,12 +508,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -740,14 +524,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -765,12 +541,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -787,14 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -812,12 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -834,14 +590,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -859,12 +607,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -882,14 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -907,12 +641,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -930,14 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -955,12 +675,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -978,14 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1003,12 +709,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1025,14 +725,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1050,12 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1072,14 +758,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1097,12 +775,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1120,14 +792,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1145,12 +809,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1168,14 +826,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1193,12 +843,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1216,14 +860,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1241,12 +877,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1263,14 +893,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1288,12 +910,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1311,14 +927,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1336,12 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1359,14 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1384,12 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1407,14 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll index e864d71fdad11..f13a85d29b099 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.or.nxv8i7(, , , i32) - define @vor_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vor_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.or.nxv1i8(, , , i32) - define @vor_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i8: ; CHECK: # %bb.0: @@ -84,8 +80,6 @@ define @vor_vi_nxv1i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv2i8(, , , i32) - define @vor_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i8: ; CHECK: # %bb.0: @@ -150,8 +144,6 @@ define @vor_vi_nxv2i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv4i8(, , , i32) - define @vor_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i8: ; CHECK: # %bb.0: @@ -216,8 +208,6 @@ define @vor_vi_nxv4i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv8i8(, , , i32) - define @vor_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i8: ; CHECK: # %bb.0: @@ -282,8 +272,6 @@ define @vor_vi_nxv8i8_unmasked( %va, i32 zero ret %v } -declare @llvm.vp.or.nxv16i8(, , , i32) - define @vor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i8: ; CHECK: # %bb.0: @@ -348,8 +336,6 @@ define @vor_vi_nxv16i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv32i8(, , , i32) - define @vor_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i8: ; CHECK: # %bb.0: @@ -414,8 +400,6 @@ define @vor_vi_nxv32i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv64i8(, , , i32) - define @vor_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv64i8: ; CHECK: # %bb.0: @@ -480,8 +464,6 @@ define @vor_vi_nxv64i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv1i16(, , , i32) - define @vor_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i16: ; CHECK: # %bb.0: @@ -546,8 +528,6 @@ define @vor_vi_nxv1i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv2i16(, , , i32) - define @vor_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i16: ; CHECK: # %bb.0: @@ -612,8 +592,6 @@ define @vor_vi_nxv2i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv4i16(, , , i32) - define @vor_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i16: ; CHECK: # %bb.0: @@ -678,8 +656,6 @@ define @vor_vi_nxv4i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv8i16(, , , i32) - define @vor_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i16: ; CHECK: # %bb.0: @@ -744,8 +720,6 @@ define @vor_vi_nxv8i16_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv16i16(, , , i32) - define @vor_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i16: ; CHECK: # %bb.0: @@ -810,8 +784,6 @@ define @vor_vi_nxv16i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv32i16(, , , i32) - define @vor_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i16: ; CHECK: # %bb.0: @@ -876,8 +848,6 @@ define @vor_vi_nxv32i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv1i32(, , , i32) - define @vor_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i32: ; CHECK: # %bb.0: @@ -942,8 +912,6 @@ define @vor_vi_nxv1i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv2i32(, , , i32) - define @vor_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1032,8 +1000,6 @@ define @vor_vi_nxv2i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv4i32(, , , i32) - define @vor_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1098,8 +1064,6 @@ define @vor_vi_nxv4i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv8i32(, , , i32) - define @vor_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1164,8 +1128,6 @@ define @vor_vi_nxv8i32_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv10i32(, , , i32) - define @vor_vv_nxv10i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv10i32: ; CHECK: # %bb.0: @@ -1230,8 +1192,6 @@ define @vor_vi_nxv10i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv16i32(, , , i32) - define @vor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1296,8 +1256,6 @@ define @vor_vi_nxv16i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.or.nxv1i64(, , , i32) - define @vor_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1390,8 +1348,6 @@ define @vor_vi_nxv1i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv2i64(, , , i32) - define @vor_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1484,8 +1440,6 @@ define @vor_vi_nxv2i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv4i64(, , , i32) - define @vor_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1578,8 +1532,6 @@ define @vor_vi_nxv4i64_unmasked( %va, i32 z ret %v } -declare @llvm.vp.or.nxv8i64(, , , i32) - define @vor_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vor.ll b/llvm/test/CodeGen/RISCV/rvv/vor.ll index 3b5c6ff2abe7e..f5d0f2383cce3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vor.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vor.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll b/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll index 24d8e56fa17fe..75c60ad9382b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-combine-reverse-load.ll @@ -75,6 +75,3 @@ define @test_different_evl(* %ptr, %rev } -declare @llvm.vp.load.nxv2f32.p0nxv2f32(* nocapture, , i32) -declare @llvm.experimental.vp.reverse.nxv2f32(, , i32) -declare @llvm.experimental.vp.reverse.nxv2i1(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll index a2466c48b0ab7..5fa29dac69601 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-combine-store-reverse.ll @@ -77,6 +77,3 @@ define void @test_different_evl( %val, * ret void } -declare @llvm.experimental.vp.reverse.nxv2f32(, , i32) -declare @llvm.experimental.vp.reverse.nxv2i1(, , i32) -declare void @llvm.vp.store.nxv2f32.p0nxv2f32(, * nocapture, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll index b316f5f878816..8e8622b3d71d4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll @@ -236,7 +236,3 @@ define iXLen @fixed_v2i64_zero_poison(<2 x i64> %src, <2 x i1> %m, i32 %evl) { ret iXLen %r } -declare iXLen @llvm.vp.cttz.elts.iXLen.nxv2i1(, i1, , i32) -declare iXLen @llvm.vp.cttz.elts.iXLen.nxv2i32(, i1, , i32) -declare iXLen @llvm.vp.cttz.elts.iXLen.nxv2i64(, i1, , i32) -declare iXLen @llvm.vp.cttz.elts.iXLen.v2i64(<2 x i64>, i1, <2 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll b/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll index df003907dc360..4f4db95f058a3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-inttoptr-ptrtoint.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s -declare @llvm.vp.inttoptr.nxv4p0.nxv4i8(, , i32) - define @inttoptr_nxv4p0_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i8: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define @inttoptr_nxv4p0_nxv4i8( %va, %v } -declare @llvm.vp.inttoptr.nxv4p0.nxv4i16(, , i32) - define @inttoptr_nxv4p0_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i16: ; CHECK: # %bb.0: @@ -27,8 +23,6 @@ define @inttoptr_nxv4p0_nxv4i16( %va, %v } -declare @llvm.vp.inttoptr.nxv4p0.nxv4i32(, , i32) - define @inttoptr_nxv4p0_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i32: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define @inttoptr_nxv4p0_nxv4i32( %va, %v } -declare @llvm.vp.inttoptr.nxv4p0.nxv4i64(, , i32) - define @inttoptr_nxv4p0_nxv4i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: inttoptr_nxv4p0_nxv4i64: ; CHECK: # %bb.0: @@ -50,8 +42,6 @@ define @inttoptr_nxv4p0_nxv4i64( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i8.nxv4p0(, , i32) - define @ptrtoint_nxv4i8_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i8_nxv4p0: ; CHECK: # %bb.0: @@ -66,8 +56,6 @@ define @ptrtoint_nxv4i8_nxv4p0( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i16.nxv4p0(, , i32) - define @ptrtoint_nxv4i16_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i16_nxv4p0: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define @ptrtoint_nxv4i16_nxv4p0( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i32.nxv4p0(, , i32) - define @ptrtoint_nxv4i32_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i32_nxv4p0: ; CHECK: # %bb.0: @@ -93,8 +79,6 @@ define @ptrtoint_nxv4i32_nxv4p0( %va, %v } -declare @llvm.vp.ptrtoint.nxv4i64.nxv4p0(, , i32) - define @ptrtoint_nxv4i64_nxv4p0( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: ptrtoint_nxv4i64_nxv4p0: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll index 09d92c3c039f9..4b8effb70586e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll @@ -158,7 +158,3 @@ define <16 x i1> @test_vp_reverse_v16i1(<16 x i1> %src, i32 zeroext %evl) { ret <16 x i1> %dst } -declare <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1>,<2 x i1>,i32) -declare <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1>,<4 x i1>,i32) -declare <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1>,<8 x i1>,i32) -declare <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1>,<16 x i1>,i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll index 8e44d76e7010f..bb15a2241faa6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll @@ -304,10 +304,3 @@ define @test_vp_reverse_nxv64i1( %src, i32 ret %dst } -declare @llvm.experimental.vp.reverse.nxv1i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv2i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv4i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv8i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv16i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv32i1(,,i32) -declare @llvm.experimental.vp.reverse.nxv64i1(,,i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll index dec68fa970c99..aa9854f7681f1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+vl-dependent-latency -verify-machineinstrs \ ; RUN: < %s | FileCheck %s --check-prefix=VLDEP -declare <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1>, <2 x i1>, i32, <2 x i1>, i32, i32) -declare <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1>, <4 x i1>, i32, <4 x i1>, i32, i32) -declare <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1>, <8 x i1>, i32, <8 x i1>, i32, i32) -declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32, <16 x i1>, i32, i32) - define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; NOVLDEP-LABEL: test_vp_splice_v2i1: ; NOVLDEP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll index 36f2f4e6269d8..3215b4548243c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll @@ -2,14 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefix=NOVLDEP ; RUN: llc -mtriple=riscv64 -mattr=+v,+vl-dependent-latency -verify-machineinstrs < %s | FileCheck %s --check-prefix=VLDEP -declare @llvm.experimental.vp.splice.nxv1i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv2i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv4i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv8i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv16i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv32i1(, , i32, , i32, i32) -declare @llvm.experimental.vp.splice.nxv64i1(, , i32, , i32, i32) - define @test_vp_splice_nxv1i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 { ; NOVLDEP-LABEL: test_vp_splice_nxv1i1: ; NOVLDEP: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll index 989fbb7fcea8b..d1f1538570011 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-vaaddu.ll @@ -1,17 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare @llvm.vp.zext.nxv2i16.nxv2i8(, , i32) -declare @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) -declare @llvm.vp.zext.nxv2i32.nxv2i16(, , i32) -declare @llvm.vp.trunc.nxv2i8.nxv2i16(, , i32) -declare @llvm.vp.trunc.nxv2i16.nxv2i32(, , i32) -declare @llvm.vp.trunc.nxv2i8.nxv2i32(, , i32) -declare @llvm.vp.add.nxv2i16(, , , i32) -declare @llvm.vp.lshr.nxv2i16(, , , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.lshr.nxv2i32(, , , i32) - define @vaaddu_1( %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vaaddu_1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll index b6ec7906885ff..a075bba81d3c6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare @llvm.vp.gather.nxv1i8.nxv1p0(, , i32) - define @vpgather_nxv1i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i8: ; RV32: # %bb.0: @@ -28,8 +26,6 @@ define @vpgather_nxv1i8( %ptrs, %v } -declare @llvm.vp.gather.nxv2i8.nxv2p0(, , i32) - define @vpgather_nxv2i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i8: ; RV32: # %bb.0: @@ -174,8 +170,6 @@ define @vpgather_nxv2i8_zextload_nxv2i64( % ret %ev } -declare @llvm.vp.gather.nxv4i8.nxv4p0(, , i32) - define @vpgather_nxv4i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i8: ; RV32: # %bb.0: @@ -212,8 +206,6 @@ define @vpgather_truemask_nxv4i8( %ptrs, i32 ret %v } -declare @llvm.vp.gather.nxv8i8.nxv8p0(, , i32) - define @vpgather_nxv8i8( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i8: ; RV32: # %bb.0: @@ -253,8 +245,6 @@ define @vpgather_baseidx_nxv8i8(ptr %base, % ret %v } -declare @llvm.vp.gather.nxv32i8.nxv32p0(, , i32) - define @vpgather_baseidx_nxv32i8(ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv32i8: ; RV32: # %bb.0: @@ -344,8 +334,6 @@ define @vpgather_baseidx_nxv32i8(ptr %base, %v } -declare @llvm.vp.gather.nxv1i16.nxv1p0(, , i32) - define @vpgather_nxv1i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i16: ; RV32: # %bb.0: @@ -364,8 +352,6 @@ define @vpgather_nxv1i16( %ptrs, %v } -declare @llvm.vp.gather.nxv2i16.nxv2p0(, , i32) - define @vpgather_nxv2i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i16: ; RV32: # %bb.0: @@ -468,8 +454,6 @@ define @vpgather_nxv2i16_zextload_nxv2i64( ret %ev } -declare @llvm.vp.gather.nxv4i16.nxv4p0(, , i32) - define @vpgather_nxv4i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i16: ; RV32: # %bb.0: @@ -506,8 +490,6 @@ define @vpgather_truemask_nxv4i16( %ptrs, i ret %v } -declare @llvm.vp.gather.nxv8i16.nxv8p0(, , i32) - define @vpgather_nxv8i16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i16: ; RV32: # %bb.0: @@ -616,8 +598,6 @@ define @vpgather_baseidx_nxv8i16(ptr %base, %v } -declare @llvm.vp.gather.nxv1i32.nxv1p0(, , i32) - define @vpgather_nxv1i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i32: ; RV32: # %bb.0: @@ -635,8 +615,6 @@ define @vpgather_nxv1i32( %ptrs, %v } -declare @llvm.vp.gather.nxv2i32.nxv2p0(, , i32) - define @vpgather_nxv2i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i32: ; RV32: # %bb.0: @@ -696,8 +674,6 @@ define @vpgather_nxv2i32_zextload_nxv2i64( ret %ev } -declare @llvm.vp.gather.nxv4i32.nxv4p0(, , i32) - define @vpgather_nxv4i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i32: ; RV32: # %bb.0: @@ -732,8 +708,6 @@ define @vpgather_truemask_nxv4i32( %ptrs, i ret %v } -declare @llvm.vp.gather.nxv8i32.nxv8p0(, , i32) - define @vpgather_nxv8i32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i32: ; RV32: # %bb.0: @@ -911,8 +885,6 @@ define @vpgather_baseidx_nxv8i32(ptr %base, %v } -declare @llvm.vp.gather.nxv1i64.nxv1p0(, , i32) - define @vpgather_nxv1i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1i64: ; RV32: # %bb.0: @@ -930,8 +902,6 @@ define @vpgather_nxv1i64( %ptrs, %v } -declare @llvm.vp.gather.nxv2i64.nxv2p0(, , i32) - define @vpgather_nxv2i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2i64: ; RV32: # %bb.0: @@ -949,8 +919,6 @@ define @vpgather_nxv2i64( %ptrs, %v } -declare @llvm.vp.gather.nxv4i64.nxv4p0(, , i32) - define @vpgather_nxv4i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4i64: ; RV32: # %bb.0: @@ -985,8 +953,6 @@ define @vpgather_truemask_nxv4i64( %ptrs, i ret %v } -declare @llvm.vp.gather.nxv8i64.nxv8p0(, , i32) - define @vpgather_nxv8i64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8i64: ; RV32: # %bb.0: @@ -1231,8 +1197,6 @@ define @vpgather_baseidx_nxv8i64(ptr %base, %v } -declare @llvm.vp.gather.nxv1bf16.nxv1p0(, , i32) - define @vpgather_nxv1bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1bf16: ; RV32: # %bb.0: @@ -1251,8 +1215,6 @@ define @vpgather_nxv1bf16( %ptrs, %v } -declare @llvm.vp.gather.nxv2bf16.nxv2p0(, , i32) - define @vpgather_nxv2bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2bf16: ; RV32: # %bb.0: @@ -1271,8 +1233,6 @@ define @vpgather_nxv2bf16( %ptrs, %v } -declare @llvm.vp.gather.nxv4bf16.nxv4p0(, , i32) - define @vpgather_nxv4bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4bf16: ; RV32: # %bb.0: @@ -1309,8 +1269,6 @@ define @vpgather_truemask_nxv4bf16( %ptr ret %v } -declare @llvm.vp.gather.nxv8bf16.nxv8p0(, , i32) - define @vpgather_nxv8bf16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8bf16: ; RV32: # %bb.0: @@ -1419,8 +1377,6 @@ define @vpgather_baseidx_nxv8bf16(ptr %base, %v } -declare @llvm.vp.gather.nxv1f16.nxv1p0(, , i32) - define @vpgather_nxv1f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f16: ; RV32: # %bb.0: @@ -1439,8 +1395,6 @@ define @vpgather_nxv1f16( %ptrs, %v } -declare @llvm.vp.gather.nxv2f16.nxv2p0(, , i32) - define @vpgather_nxv2f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f16: ; RV32: # %bb.0: @@ -1459,8 +1413,6 @@ define @vpgather_nxv2f16( %ptrs, %v } -declare @llvm.vp.gather.nxv4f16.nxv4p0(, , i32) - define @vpgather_nxv4f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f16: ; RV32: # %bb.0: @@ -1497,8 +1449,6 @@ define @vpgather_truemask_nxv4f16( %ptrs, ret %v } -declare @llvm.vp.gather.nxv8f16.nxv8p0(, , i32) - define @vpgather_nxv8f16( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f16: ; RV32: # %bb.0: @@ -1607,8 +1557,6 @@ define @vpgather_baseidx_nxv8f16(ptr %base, %v } -declare @llvm.vp.gather.nxv1f32.nxv1p0(, , i32) - define @vpgather_nxv1f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f32: ; RV32: # %bb.0: @@ -1626,8 +1574,6 @@ define @vpgather_nxv1f32( %ptrs, %v } -declare @llvm.vp.gather.nxv2f32.nxv2p0(, , i32) - define @vpgather_nxv2f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f32: ; RV32: # %bb.0: @@ -1645,8 +1591,6 @@ define @vpgather_nxv2f32( %ptrs, %v } -declare @llvm.vp.gather.nxv4f32.nxv4p0(, , i32) - define @vpgather_nxv4f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f32: ; RV32: # %bb.0: @@ -1681,8 +1625,6 @@ define @vpgather_truemask_nxv4f32( %ptrs, ret %v } -declare @llvm.vp.gather.nxv8f32.nxv8p0(, , i32) - define @vpgather_nxv8f32( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f32: ; RV32: # %bb.0: @@ -1860,8 +1802,6 @@ define @vpgather_baseidx_nxv8f32(ptr %base, %v } -declare @llvm.vp.gather.nxv1f64.nxv1p0(, , i32) - define @vpgather_nxv1f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv1f64: ; RV32: # %bb.0: @@ -1879,8 +1819,6 @@ define @vpgather_nxv1f64( %ptrs, %v } -declare @llvm.vp.gather.nxv2f64.nxv2p0(, , i32) - define @vpgather_nxv2f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv2f64: ; RV32: # %bb.0: @@ -1898,8 +1836,6 @@ define @vpgather_nxv2f64( %ptrs, %v } -declare @llvm.vp.gather.nxv4f64.nxv4p0(, , i32) - define @vpgather_nxv4f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv4f64: ; RV32: # %bb.0: @@ -1934,8 +1870,6 @@ define @vpgather_truemask_nxv4f64( %ptrs ret %v } -declare @llvm.vp.gather.nxv6f64.nxv6p0(, , i32) - define @vpgather_nxv6f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv6f64: ; RV32: # %bb.0: @@ -2180,8 +2114,6 @@ define @vpgather_baseidx_nxv6f64(ptr %base, %v } -declare @llvm.vp.gather.nxv8f64.nxv8p0(, , i32) - define @vpgather_nxv8f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv8f64: ; RV32: # %bb.0: @@ -2426,8 +2358,6 @@ define @vpgather_baseidx_nxv8f64(ptr %base, %v } -declare @llvm.vp.gather.nxv16f64.nxv16p0(, , i32) - define @vpgather_nxv16f64( %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_nxv16f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll index 3a26af0279d50..2ece316c7e54a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.load.nxv1i8.p0(ptr, , i32) - define @vpload_nxv1i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i8: ; CHECK: # %bb.0: @@ -41,8 +39,6 @@ define @vpload_nxv1i8_passthru(ptr %ptr, %m, ret %merge } -declare @llvm.vp.load.nxv2i8.p0(ptr, , i32) - define @vpload_nxv2i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i8: ; CHECK: # %bb.0: @@ -53,8 +49,6 @@ define @vpload_nxv2i8(ptr %ptr, %m, i32 zero ret %load } -declare @llvm.vp.load.nxv3i8.p0(ptr, , i32) - define @vpload_nxv3i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv3i8: ; CHECK: # %bb.0: @@ -65,8 +59,6 @@ define @vpload_nxv3i8(ptr %ptr, %m, i32 zero ret %load } -declare @llvm.vp.load.nxv4i6.nxv4i6.p0(*, , i32) - define @vpload_nxv4i6(* %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i6: ; CHECK: # %bb.0: @@ -77,8 +69,6 @@ define @vpload_nxv4i6(* %ptr, %load } -declare @llvm.vp.load.nxv4i8.p0(ptr, , i32) - define @vpload_nxv4i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i8: ; CHECK: # %bb.0: @@ -89,8 +79,6 @@ define @vpload_nxv4i8(ptr %ptr, %m, i32 zero ret %load } -declare @llvm.vp.load.nxv8i8.p0(ptr, , i32) - define @vpload_nxv8i8(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i8: ; CHECK: # %bb.0: @@ -111,8 +99,6 @@ define @vpload_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) ret %load } -declare @llvm.vp.load.nxv1i16.p0(ptr, , i32) - define @vpload_nxv1i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i16: ; CHECK: # %bb.0: @@ -123,8 +109,6 @@ define @vpload_nxv1i16(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv2i16.p0(ptr, , i32) - define @vpload_nxv2i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i16: ; CHECK: # %bb.0: @@ -145,8 +129,6 @@ define @vpload_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %ev ret %load } -declare @llvm.vp.load.nxv4i16.p0(ptr, , i32) - define @vpload_nxv4i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i16: ; CHECK: # %bb.0: @@ -157,8 +139,6 @@ define @vpload_nxv4i16(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv8i16.p0(ptr, , i32) - define @vpload_nxv8i16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i16: ; CHECK: # %bb.0: @@ -169,8 +149,6 @@ define @vpload_nxv8i16(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv1i32.p0(ptr, , i32) - define @vpload_nxv1i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i32: ; CHECK: # %bb.0: @@ -181,8 +159,6 @@ define @vpload_nxv1i32(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv2i32.p0(ptr, , i32) - define @vpload_nxv2i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i32: ; CHECK: # %bb.0: @@ -193,8 +169,6 @@ define @vpload_nxv2i32(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv4i32.p0(ptr, , i32) - define @vpload_nxv4i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i32: ; CHECK: # %bb.0: @@ -215,8 +189,6 @@ define @vpload_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %ev ret %load } -declare @llvm.vp.load.nxv8i32.p0(ptr, , i32) - define @vpload_nxv8i32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i32: ; CHECK: # %bb.0: @@ -227,8 +199,6 @@ define @vpload_nxv8i32(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv1i64.p0(ptr, , i32) - define @vpload_nxv1i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i64: ; CHECK: # %bb.0: @@ -249,8 +219,6 @@ define @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %ev ret %load } -declare @llvm.vp.load.nxv2i64.p0(ptr, , i32) - define @vpload_nxv2i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i64: ; CHECK: # %bb.0: @@ -261,8 +229,6 @@ define @vpload_nxv2i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv4i64.p0(ptr, , i32) - define @vpload_nxv4i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i64: ; CHECK: # %bb.0: @@ -273,8 +239,6 @@ define @vpload_nxv4i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv8i64.p0(ptr, , i32) - define @vpload_nxv8i64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i64: ; CHECK: # %bb.0: @@ -285,8 +249,6 @@ define @vpload_nxv8i64(ptr %ptr, %m, i32 ze ret %load } -declare @llvm.vp.load.nxv1bf16.p0(ptr, , i32) - define @vpload_nxv1bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1bf16: ; CHECK: # %bb.0: @@ -297,8 +259,6 @@ define @vpload_nxv1bf16(ptr %ptr, %m, i3 ret %load } -declare @llvm.vp.load.nxv2bf16.p0(ptr, , i32) - define @vpload_nxv2bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2bf16: ; CHECK: # %bb.0: @@ -319,8 +279,6 @@ define @vpload_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext ret %load } -declare @llvm.vp.load.nxv4bf16.p0(ptr, , i32) - define @vpload_nxv4bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4bf16: ; CHECK: # %bb.0: @@ -331,8 +289,6 @@ define @vpload_nxv4bf16(ptr %ptr, %m, i3 ret %load } -declare @llvm.vp.load.nxv8bf16.p0(ptr, , i32) - define @vpload_nxv8bf16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8bf16: ; CHECK: # %bb.0: @@ -343,8 +299,6 @@ define @vpload_nxv8bf16(ptr %ptr, %m, i3 ret %load } -declare @llvm.vp.load.nxv1f16.p0(ptr, , i32) - define @vpload_nxv1f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f16: ; CHECK: # %bb.0: @@ -355,8 +309,6 @@ define @vpload_nxv1f16(ptr %ptr, %m, i32 z ret %load } -declare @llvm.vp.load.nxv2f16.p0(ptr, , i32) - define @vpload_nxv2f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f16: ; CHECK: # %bb.0: @@ -377,8 +329,6 @@ define @vpload_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %e ret %load } -declare @llvm.vp.load.nxv4f16.p0(ptr, , i32) - define @vpload_nxv4f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f16: ; CHECK: # %bb.0: @@ -389,8 +339,6 @@ define @vpload_nxv4f16(ptr %ptr, %m, i32 z ret %load } -declare @llvm.vp.load.nxv8f16.p0(ptr, , i32) - define @vpload_nxv8f16(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f16: ; CHECK: # %bb.0: @@ -401,8 +349,6 @@ define @vpload_nxv8f16(ptr %ptr, %m, i32 z ret %load } -declare @llvm.vp.load.nxv1f32.p0(ptr, , i32) - define @vpload_nxv1f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f32: ; CHECK: # %bb.0: @@ -413,8 +359,6 @@ define @vpload_nxv1f32(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv2f32.p0(ptr, , i32) - define @vpload_nxv2f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f32: ; CHECK: # %bb.0: @@ -425,8 +369,6 @@ define @vpload_nxv2f32(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv4f32.p0(ptr, , i32) - define @vpload_nxv4f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f32: ; CHECK: # %bb.0: @@ -437,8 +379,6 @@ define @vpload_nxv4f32(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv8f32.p0(ptr, , i32) - define @vpload_nxv8f32(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f32: ; CHECK: # %bb.0: @@ -459,8 +399,6 @@ define @vpload_nxv8f32_allones_mask(ptr %ptr, i32 zeroext % ret %load } -declare @llvm.vp.load.nxv1f64.p0(ptr, , i32) - define @vpload_nxv1f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1f64: ; CHECK: # %bb.0: @@ -471,8 +409,6 @@ define @vpload_nxv1f64(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv2f64.p0(ptr, , i32) - define @vpload_nxv2f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f64: ; CHECK: # %bb.0: @@ -483,8 +419,6 @@ define @vpload_nxv2f64(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv4f64.p0(ptr, , i32) - define @vpload_nxv4f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f64: ; CHECK: # %bb.0: @@ -505,8 +439,6 @@ define @vpload_nxv4f64_allones_mask(ptr %ptr, i32 zeroext ret %load } -declare @llvm.vp.load.nxv8f64.p0(ptr, , i32) - define @vpload_nxv8f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f64: ; CHECK: # %bb.0: @@ -517,8 +449,6 @@ define @vpload_nxv8f64(ptr %ptr, %m, i32 ret %load } -declare @llvm.vp.load.nxv16f64.p0(ptr, , i32) - define @vpload_nxv16f64(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv16f64: ; CHECK: # %bb.0: @@ -547,11 +477,6 @@ define @vpload_nxv16f64(ptr %ptr, %m, ret %load } -declare @llvm.vp.load.nxv17f64.p0(ptr, , i32) - -declare @llvm.vector.extract.nxv1f64( %vec, i64 %idx) -declare @llvm.vector.extract.nxv16f64( %vec, i64 %idx) - ; Note: We can't return as that introduces a vector ; store can't yet be legalized through widening. In order to test purely the ; vp.load legalization, manually split it. diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll index 16201da1a509a..8a1a3cdd90d72 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode-bf16.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+zvfh,+m,+zfbfmin,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.merge.nxv1bf16(, , , i32) - define @vpmerge_vv_nxv1bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1bf16: ; CHECK: # %bb.0: @@ -36,8 +34,6 @@ define @vpmerge_vf_nxv1bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv2bf16(, , , i32) - define @vpmerge_vv_nxv2bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2bf16: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define @vpmerge_vf_nxv2bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv4bf16(, , , i32) - define @vpmerge_vv_nxv4bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4bf16: ; CHECK: # %bb.0: @@ -92,8 +86,6 @@ define @vpmerge_vf_nxv4bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv8bf16(, , , i32) - define @vpmerge_vv_nxv8bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8bf16: ; CHECK: # %bb.0: @@ -120,8 +112,6 @@ define @vpmerge_vf_nxv8bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv16bf16(, , , i32) - define @vpmerge_vv_nxv16bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16bf16: ; CHECK: # %bb.0: @@ -148,8 +138,6 @@ define @vpmerge_vf_nxv16bf16(bfloat %a, %v } -declare @llvm.vp.merge.nxv32bf16(, , , i32) - define @vpmerge_vv_nxv32bf16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll index 6e0aee18c6c74..03697aafea45d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -7,7 +7,6 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFHMIN -declare @llvm.vp.merge.nxv1i1(, , , i32) define @vpmerge_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; RV32-LABEL: vpmerge_nxv1i1: @@ -232,8 +231,6 @@ define @vpmerge_nxv128i1( %va, %v } -declare @llvm.vp.merge.nxv1i8(, , , i32) - define @vpmerge_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i8: ; CHECK: # %bb.0: @@ -267,8 +264,6 @@ define @vpmerge_vi_nxv1i8( %vb, %v } -declare @llvm.vp.merge.nxv2i8(, , , i32) - define @vpmerge_vv_nxv2i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i8: ; CHECK: # %bb.0: @@ -302,8 +297,6 @@ define @vpmerge_vi_nxv2i8( %vb, %v } -declare @llvm.vp.merge.nxv3i8(, , , i32) - define @vpmerge_vv_nxv3i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv3i8: ; CHECK: # %bb.0: @@ -337,8 +330,6 @@ define @vpmerge_vi_nxv3i8( %vb, %v } -declare @llvm.vp.merge.nxv4i8(, , , i32) - define @vpmerge_vv_nxv4i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i8: ; CHECK: # %bb.0: @@ -372,8 +363,6 @@ define @vpmerge_vi_nxv4i8( %vb, %v } -declare @llvm.vp.merge.nxv8i7(, , , i32) - define @vpmerge_vv_nxv8i7( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i7: ; CHECK: # %bb.0: @@ -407,8 +396,6 @@ define @vpmerge_vi_nxv8i7( %vb, %v } -declare @llvm.vp.merge.nxv8i8(, , , i32) - define @vpmerge_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i8: ; CHECK: # %bb.0: @@ -442,8 +429,6 @@ define @vpmerge_vi_nxv8i8( %vb, %v } -declare @llvm.vp.merge.nxv16i8(, , , i32) - define @vpmerge_vv_nxv16i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i8: ; CHECK: # %bb.0: @@ -477,8 +462,6 @@ define @vpmerge_vi_nxv16i8( %vb, %v } -declare @llvm.vp.merge.nxv32i8(, , , i32) - define @vpmerge_vv_nxv32i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32i8: ; CHECK: # %bb.0: @@ -512,8 +495,6 @@ define @vpmerge_vi_nxv32i8( %vb, %v } -declare @llvm.vp.merge.nxv64i8(, , , i32) - define @vpmerge_vv_nxv64i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv64i8: ; CHECK: # %bb.0: @@ -547,8 +528,6 @@ define @vpmerge_vi_nxv64i8( %vb, %v } -declare @llvm.vp.merge.nxv128i8(, , , i32) - define @vpmerge_vv_nxv128i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv128i8: ; CHECK: # %bb.0: @@ -649,8 +628,6 @@ define @vpmerge_vi_nxv128i8( %vb, %v } -declare @llvm.vp.merge.nxv1i16(, , , i32) - define @vpmerge_vv_nxv1i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i16: ; CHECK: # %bb.0: @@ -684,8 +661,6 @@ define @vpmerge_vi_nxv1i16( %vb, %v } -declare @llvm.vp.merge.nxv2i16(, , , i32) - define @vpmerge_vv_nxv2i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i16: ; CHECK: # %bb.0: @@ -719,8 +694,6 @@ define @vpmerge_vi_nxv2i16( %vb, %v } -declare @llvm.vp.merge.nxv4i16(, , , i32) - define @vpmerge_vv_nxv4i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i16: ; CHECK: # %bb.0: @@ -754,8 +727,6 @@ define @vpmerge_vi_nxv4i16( %vb, %v } -declare @llvm.vp.merge.nxv8i16(, , , i32) - define @vpmerge_vv_nxv8i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i16: ; CHECK: # %bb.0: @@ -789,8 +760,6 @@ define @vpmerge_vi_nxv8i16( %vb, %v } -declare @llvm.vp.merge.nxv16i16(, , , i32) - define @vpmerge_vv_nxv16i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i16: ; CHECK: # %bb.0: @@ -824,8 +793,6 @@ define @vpmerge_vi_nxv16i16( %vb, %v } -declare @llvm.vp.merge.nxv32i16(, , , i32) - define @vpmerge_vv_nxv32i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32i16: ; CHECK: # %bb.0: @@ -859,8 +826,6 @@ define @vpmerge_vi_nxv32i16( %vb, %v } -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vpmerge_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i32: ; CHECK: # %bb.0: @@ -894,8 +859,6 @@ define @vpmerge_vi_nxv1i32( %vb, %v } -declare @llvm.vp.merge.nxv2i32(, , , i32) - define @vpmerge_vv_nxv2i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i32: ; CHECK: # %bb.0: @@ -929,8 +892,6 @@ define @vpmerge_vi_nxv2i32( %vb, %v } -declare @llvm.vp.merge.nxv4i32(, , , i32) - define @vpmerge_vv_nxv4i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i32: ; CHECK: # %bb.0: @@ -964,8 +925,6 @@ define @vpmerge_vi_nxv4i32( %vb, %v } -declare @llvm.vp.merge.nxv8i32(, , , i32) - define @vpmerge_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i32: ; CHECK: # %bb.0: @@ -999,8 +958,6 @@ define @vpmerge_vi_nxv8i32( %vb, %v } -declare @llvm.vp.merge.nxv16i32(, , , i32) - define @vpmerge_vv_nxv16i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1034,8 +991,6 @@ define @vpmerge_vi_nxv16i32( %vb, %v } -declare @llvm.vp.merge.nxv1i64(, , , i32) - define @vpmerge_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1082,8 +1037,6 @@ define @vpmerge_vi_nxv1i64( %vb, %v } -declare @llvm.vp.merge.nxv2i64(, , , i32) - define @vpmerge_vv_nxv2i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1130,8 +1083,6 @@ define @vpmerge_vi_nxv2i64( %vb, %v } -declare @llvm.vp.merge.nxv4i64(, , , i32) - define @vpmerge_vv_nxv4i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1178,8 +1129,6 @@ define @vpmerge_vi_nxv4i64( %vb, %v } -declare @llvm.vp.merge.nxv8i64(, , , i32) - define @vpmerge_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i64: ; CHECK: # %bb.0: @@ -1226,8 +1175,6 @@ define @vpmerge_vi_nxv8i64( %vb, %v } -declare @llvm.vp.merge.nxv1f16(, , , i32) - define @vpmerge_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f16: ; CHECK: # %bb.0: @@ -1275,8 +1222,6 @@ define @vpmerge_vf_nxv1f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv2f16(, , , i32) - define @vpmerge_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f16: ; CHECK: # %bb.0: @@ -1324,8 +1269,6 @@ define @vpmerge_vf_nxv2f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv4f16(, , , i32) - define @vpmerge_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f16: ; CHECK: # %bb.0: @@ -1373,8 +1316,6 @@ define @vpmerge_vf_nxv4f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv8f16(, , , i32) - define @vpmerge_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f16: ; CHECK: # %bb.0: @@ -1422,8 +1363,6 @@ define @vpmerge_vf_nxv8f16(half %a, %vb, ret %v } -declare @llvm.vp.merge.nxv16f16(, , , i32) - define @vpmerge_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16f16: ; CHECK: # %bb.0: @@ -1471,8 +1410,6 @@ define @vpmerge_vf_nxv16f16(half %a, % ret %v } -declare @llvm.vp.merge.nxv32f16(, , , i32) - define @vpmerge_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32f16: ; CHECK: # %bb.0: @@ -1520,8 +1457,6 @@ define @vpmerge_vf_nxv32f16(half %a, % ret %v } -declare @llvm.vp.merge.nxv1f32(, , , i32) - define @vpmerge_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f32: ; CHECK: # %bb.0: @@ -1545,8 +1480,6 @@ define @vpmerge_vf_nxv1f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv2f32(, , , i32) - define @vpmerge_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f32: ; CHECK: # %bb.0: @@ -1570,8 +1503,6 @@ define @vpmerge_vf_nxv2f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv4f32(, , , i32) - define @vpmerge_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f32: ; CHECK: # %bb.0: @@ -1595,8 +1526,6 @@ define @vpmerge_vf_nxv4f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv8f32(, , , i32) - define @vpmerge_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f32: ; CHECK: # %bb.0: @@ -1620,8 +1549,6 @@ define @vpmerge_vf_nxv8f32(float %a, % ret %v } -declare @llvm.vp.merge.nxv16f32(, , , i32) - define @vpmerge_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16f32: ; CHECK: # %bb.0: @@ -1645,8 +1572,6 @@ define @vpmerge_vf_nxv16f32(float %a, %v } -declare @llvm.vp.merge.nxv1f64(, , , i32) - define @vpmerge_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f64: ; CHECK: # %bb.0: @@ -1670,8 +1595,6 @@ define @vpmerge_vf_nxv1f64(double %a, %v } -declare @llvm.vp.merge.nxv2f64(, , , i32) - define @vpmerge_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f64: ; CHECK: # %bb.0: @@ -1695,8 +1618,6 @@ define @vpmerge_vf_nxv2f64(double %a, %v } -declare @llvm.vp.merge.nxv4f64(, , , i32) - define @vpmerge_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f64: ; CHECK: # %bb.0: @@ -1720,8 +1641,6 @@ define @vpmerge_vf_nxv4f64(double %a, %v } -declare @llvm.vp.merge.nxv8f64(, , , i32) - define @vpmerge_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll index cf8c06fb91089..7e4a60095d7cc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v,+m \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 -declare void @llvm.vp.scatter.nxv1i8.nxv1p0(, , , i32) - define void @vpscatter_nxv1i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i8: ; RV32: # %bb.0: @@ -26,8 +24,6 @@ define void @vpscatter_nxv1i8( %val, %ptrs, ret void } -declare void @llvm.vp.scatter.nxv2i8.nxv2p0(, , , i32) - define void @vpscatter_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i8: ; RV32: # %bb.0: @@ -113,8 +109,6 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8( %val, , , , i32) - define void @vpscatter_nxv4i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i8: ; RV32: # %bb.0: @@ -147,8 +141,6 @@ define void @vpscatter_truemask_nxv4i8( %val, , , , i32) - define void @vpscatter_nxv8i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i8: ; RV32: # %bb.0: @@ -186,8 +178,6 @@ define void @vpscatter_baseidx_nxv8i8( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i16: ; RV32: # %bb.0: @@ -204,8 +194,6 @@ define void @vpscatter_nxv1i16( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv2i16.nxv2p0(, , , i32) - define void @vpscatter_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i16: ; RV32: # %bb.0: @@ -264,8 +252,6 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16( %val, , , , i32) - define void @vpscatter_nxv4i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i16: ; RV32: # %bb.0: @@ -298,8 +284,6 @@ define void @vpscatter_truemask_nxv4i16( %val, , , , i32) - define void @vpscatter_nxv8i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i16: ; RV32: # %bb.0: @@ -406,7 +390,6 @@ define void @vpscatter_baseidx_nxv8i16( %val, ptr %base, @llvm.vp.sext.nxv8i16.nxv8i32(, , i32) define void @vpscatter_baseidx_vpsext_nxv8i16_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpsext_nxv8i16_nxv8i16: ; RV32: # %bb.0: @@ -429,7 +412,6 @@ define void @vpscatter_baseidx_vpsext_nxv8i16_nxv8i16( %val, p ret void } -declare @llvm.vp.zext.nxv8i16.nxv8i32(, , i32) define void @vpscatter_baseidx_vpzext_nxv8i16_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpzext_nxv8i16_nxv8i16: ; RV32: # %bb.0: @@ -452,7 +434,6 @@ define void @vpscatter_baseidx_vpzext_nxv8i16_nxv8i16( %val, p ret void } -declare @llvm.vp.sext.nxv8i32.nxv8i64(, , i32) define void @vpscatter_baseidx_vpsext_nxv8i32_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpsext_nxv8i32_nxv8i16: ; RV32: # %bb.0: @@ -478,7 +459,6 @@ define void @vpscatter_baseidx_vpsext_nxv8i32_nxv8i16( %val, p ret void } -declare @llvm.vp.zext.nxv8i32.nxv8i64(, , i32) define void @vpscatter_baseidx_vpzext_nxv8i32_nxv8i16( %val, ptr %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_vpzext_nxv8i32_nxv8i16: ; RV32: # %bb.0: @@ -504,8 +484,6 @@ define void @vpscatter_baseidx_vpzext_nxv8i32_nxv8i16( %val, p ret void } -declare void @llvm.vp.scatter.nxv1i32.nxv1p0(, , , i32) - define void @vpscatter_nxv1i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i32: ; RV32: # %bb.0: @@ -522,8 +500,6 @@ define void @vpscatter_nxv1i32( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv2i32.nxv2p0(, , , i32) - define void @vpscatter_nxv2i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i32: ; RV32: # %bb.0: @@ -559,8 +535,6 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32( %val, , , , i32) - define void @vpscatter_nxv4i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i32: ; RV32: # %bb.0: @@ -593,8 +567,6 @@ define void @vpscatter_truemask_nxv4i32( %val, , , , i32) - define void @vpscatter_nxv8i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i32: ; RV32: # %bb.0: @@ -771,8 +743,6 @@ define void @vpscatter_baseidx_nxv8i32( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i64: ; RV32: # %bb.0: @@ -789,8 +759,6 @@ define void @vpscatter_nxv1i64( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv2i64.nxv2p0(, , , i32) - define void @vpscatter_nxv2i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i64: ; RV32: # %bb.0: @@ -807,8 +775,6 @@ define void @vpscatter_nxv2i64( %val, %ptrs ret void } -declare void @llvm.vp.scatter.nxv4i64.nxv4p0(, , , i32) - define void @vpscatter_nxv4i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i64: ; RV32: # %bb.0: @@ -841,8 +807,6 @@ define void @vpscatter_truemask_nxv4i64( %val, , , , i32) - define void @vpscatter_nxv8i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i64: ; RV32: # %bb.0: @@ -1086,8 +1050,6 @@ define void @vpscatter_baseidx_nxv8i64( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1bf16: ; RV32: # %bb.0: @@ -1104,8 +1066,6 @@ define void @vpscatter_nxv1bf16( %val, % ret void } -declare void @llvm.vp.scatter.nxv2bf16.nxv2p0(, , , i32) - define void @vpscatter_nxv2bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2bf16: ; RV32: # %bb.0: @@ -1122,8 +1082,6 @@ define void @vpscatter_nxv2bf16( %val, % ret void } -declare void @llvm.vp.scatter.nxv4bf16.nxv4p0(, , , i32) - define void @vpscatter_nxv4bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4bf16: ; RV32: # %bb.0: @@ -1156,8 +1114,6 @@ define void @vpscatter_truemask_nxv4bf16( %val, , , , i32) - define void @vpscatter_nxv8bf16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8bf16: ; RV32: # %bb.0: @@ -1264,8 +1220,6 @@ define void @vpscatter_baseidx_nxv8bf16( %val, ptr %base, < ret void } -declare void @llvm.vp.scatter.nxv1f16.nxv1p0(, , , i32) - define void @vpscatter_nxv1f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f16: ; RV32: # %bb.0: @@ -1282,8 +1236,6 @@ define void @vpscatter_nxv1f16( %val, %ptr ret void } -declare void @llvm.vp.scatter.nxv2f16.nxv2p0(, , , i32) - define void @vpscatter_nxv2f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f16: ; RV32: # %bb.0: @@ -1300,8 +1252,6 @@ define void @vpscatter_nxv2f16( %val, %ptr ret void } -declare void @llvm.vp.scatter.nxv4f16.nxv4p0(, , , i32) - define void @vpscatter_nxv4f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f16: ; RV32: # %bb.0: @@ -1334,8 +1284,6 @@ define void @vpscatter_truemask_nxv4f16( %val, , , , i32) - define void @vpscatter_nxv8f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f16: ; RV32: # %bb.0: @@ -1442,8 +1390,6 @@ define void @vpscatter_baseidx_nxv8f16( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f32: ; RV32: # %bb.0: @@ -1460,8 +1406,6 @@ define void @vpscatter_nxv1f32( %val, %pt ret void } -declare void @llvm.vp.scatter.nxv2f32.nxv2p0(, , , i32) - define void @vpscatter_nxv2f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f32: ; RV32: # %bb.0: @@ -1478,8 +1422,6 @@ define void @vpscatter_nxv2f32( %val, %pt ret void } -declare void @llvm.vp.scatter.nxv4f32.nxv4p0(, , , i32) - define void @vpscatter_nxv4f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f32: ; RV32: # %bb.0: @@ -1512,8 +1454,6 @@ define void @vpscatter_truemask_nxv4f32( %val, , , , i32) - define void @vpscatter_nxv8f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f32: ; RV32: # %bb.0: @@ -1690,8 +1630,6 @@ define void @vpscatter_baseidx_nxv8f32( %val, ptr %base, , , , i32) - define void @vpscatter_nxv1f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f64: ; RV32: # %bb.0: @@ -1708,8 +1646,6 @@ define void @vpscatter_nxv1f64( %val, %p ret void } -declare void @llvm.vp.scatter.nxv2f64.nxv2p0(, , , i32) - define void @vpscatter_nxv2f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f64: ; RV32: # %bb.0: @@ -1726,8 +1662,6 @@ define void @vpscatter_nxv2f64( %val, %p ret void } -declare void @llvm.vp.scatter.nxv4f64.nxv4p0(, , , i32) - define void @vpscatter_nxv4f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f64: ; RV32: # %bb.0: @@ -1760,8 +1694,6 @@ define void @vpscatter_truemask_nxv4f64( %val, , , , i32) - define void @vpscatter_nxv6f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv6f64: ; RV32: # %bb.0: @@ -2005,8 +1937,6 @@ define void @vpscatter_baseidx_nxv6f64( %val, ptr %base, , , , i32) - define void @vpscatter_nxv8f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f64: ; RV32: # %bb.0: @@ -2250,8 +2180,6 @@ define void @vpscatter_baseidx_nxv8f64( %val, ptr %base, , , , i32) - define void @vpscatter_nxv16f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv16f64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll index 982ec218e4688..9fd8b9d23cb5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.vp.store.nxv1i8.p0(, ptr, , i32) - define void @vpstore_nxv1i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i8: ; CHECK: # %bb.0: @@ -20,8 +18,6 @@ define void @vpstore_nxv1i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv2i8.p0(, ptr, , i32) - define void @vpstore_nxv2i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i8: ; CHECK: # %bb.0: @@ -32,8 +28,6 @@ define void @vpstore_nxv2i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv3i8.p0(, ptr, , i32) - define void @vpstore_nxv3i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv3i8: ; CHECK: # %bb.0: @@ -44,8 +38,6 @@ define void @vpstore_nxv3i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv4i8.p0(, ptr, , i32) - define void @vpstore_nxv4i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i8: ; CHECK: # %bb.0: @@ -56,8 +48,6 @@ define void @vpstore_nxv4i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv8i8.p0(, ptr, , i32) - define void @vpstore_nxv8i8( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i8: ; CHECK: # %bb.0: @@ -68,8 +58,6 @@ define void @vpstore_nxv8i8( %val, ptr %ptr, ret void } -declare void @llvm.vp.store.nxv1i16.p0(, ptr, , i32) - define void @vpstore_nxv1i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i16: ; CHECK: # %bb.0: @@ -80,8 +68,6 @@ define void @vpstore_nxv1i16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i16: ; CHECK: # %bb.0: @@ -92,8 +78,6 @@ define void @vpstore_nxv2i16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i16: ; CHECK: # %bb.0: @@ -104,8 +88,6 @@ define void @vpstore_nxv4i16( %val, ptr %ptr, , *, , i32) - define void @vpstore_nxv8i12( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i12: ; CHECK: # %bb.0: @@ -116,8 +98,6 @@ define void @vpstore_nxv8i12( %val, * %ptr, ret void } -declare void @llvm.vp.store.nxv8i16.p0(, ptr, , i32) - define void @vpstore_nxv8i16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i16: ; CHECK: # %bb.0: @@ -128,8 +108,6 @@ define void @vpstore_nxv8i16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i32: ; CHECK: # %bb.0: @@ -140,8 +118,6 @@ define void @vpstore_nxv1i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i32: ; CHECK: # %bb.0: @@ -152,8 +128,6 @@ define void @vpstore_nxv2i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i32: ; CHECK: # %bb.0: @@ -164,8 +138,6 @@ define void @vpstore_nxv4i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8i32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i32: ; CHECK: # %bb.0: @@ -176,8 +148,6 @@ define void @vpstore_nxv8i32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i64: ; CHECK: # %bb.0: @@ -188,8 +158,6 @@ define void @vpstore_nxv1i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i64: ; CHECK: # %bb.0: @@ -200,8 +168,6 @@ define void @vpstore_nxv2i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i64: ; CHECK: # %bb.0: @@ -212,8 +178,6 @@ define void @vpstore_nxv4i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8i64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i64: ; CHECK: # %bb.0: @@ -224,8 +188,6 @@ define void @vpstore_nxv8i64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1bf16: ; CHECK: # %bb.0: @@ -236,8 +198,6 @@ define void @vpstore_nxv1bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2bf16: ; CHECK: # %bb.0: @@ -248,8 +208,6 @@ define void @vpstore_nxv2bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4bf16: ; CHECK: # %bb.0: @@ -260,8 +218,6 @@ define void @vpstore_nxv4bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8bf16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8bf16: ; CHECK: # %bb.0: @@ -272,8 +228,6 @@ define void @vpstore_nxv8bf16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f16: ; CHECK: # %bb.0: @@ -284,8 +238,6 @@ define void @vpstore_nxv1f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f16: ; CHECK: # %bb.0: @@ -296,8 +248,6 @@ define void @vpstore_nxv2f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f16: ; CHECK: # %bb.0: @@ -308,8 +258,6 @@ define void @vpstore_nxv4f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8f16( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f16: ; CHECK: # %bb.0: @@ -320,8 +268,6 @@ define void @vpstore_nxv8f16( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f32: ; CHECK: # %bb.0: @@ -332,8 +278,6 @@ define void @vpstore_nxv1f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f32: ; CHECK: # %bb.0: @@ -344,8 +288,6 @@ define void @vpstore_nxv2f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f32: ; CHECK: # %bb.0: @@ -356,8 +298,6 @@ define void @vpstore_nxv4f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8f32( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f32: ; CHECK: # %bb.0: @@ -368,8 +308,6 @@ define void @vpstore_nxv8f32( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv1f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f64: ; CHECK: # %bb.0: @@ -380,8 +318,6 @@ define void @vpstore_nxv1f64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv2f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f64: ; CHECK: # %bb.0: @@ -392,8 +328,6 @@ define void @vpstore_nxv2f64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv4f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f64: ; CHECK: # %bb.0: @@ -404,8 +338,6 @@ define void @vpstore_nxv4f64( %val, ptr %ptr, , ptr, , i32) - define void @vpstore_nxv8f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f64: ; CHECK: # %bb.0: @@ -426,8 +358,6 @@ define void @vpstore_nxv1i8_allones_mask( %val, ptr %ptr, i32 z ret void } -declare void @llvm.vp.store.nxv16f64.p0(, ptr, , i32) - define void @vpstore_nxv16f64( %val, ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv16f64: ; CHECK: # %bb.0: @@ -455,8 +385,6 @@ define void @vpstore_nxv16f64( %val, ptr %ptr, , ptr, , i32) - ; Widen to nxv32f64 then split into 4 x nxv8f64, of which 1 is empty. define void @vpstore_nxv17f64( %val, ptr %ptr, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdot.ll b/llvm/test/CodeGen/RISCV/rvv/vqdot.ll index c04b1925b749a..6014c8aceb599 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdot.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdot.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdot.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vqdot_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv2i32_nxv2i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv4i32_nxv4i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv8i32_nxv8i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vv_nxv16i32_nxv16i32: @@ -115,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv1i32_nxv1i32: @@ -139,13 +101,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv2i32_nxv2i32: @@ -163,13 +118,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv4i32_nxv4i32: @@ -187,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv8i32_nxv8i32: @@ -211,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vv_nxv16i32_nxv16i32: @@ -236,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv1i32_i32: @@ -258,12 +186,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv2i32_i32: @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv4i32_i32: @@ -302,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv8i32_i32: @@ -324,12 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdot_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_vx_nxv16i32_i32: @@ -346,13 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv1i32_i32: @@ -370,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv2i32_i32: @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv4i32_i32: @@ -418,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv8i32_i32: @@ -442,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vqdot.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdot_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdot_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll b/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll index 904ff293d3847..f54ec9b4308fd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdotsu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdotsu.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vqdotsu_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv2i32_nxv2i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv4i32_nxv4i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv8i32_nxv8i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vv_nxv16i32_nxv16i32: @@ -115,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv1i32_nxv1i32: @@ -139,13 +101,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv2i32_nxv2i32: @@ -163,13 +118,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv4i32_nxv4i32: @@ -187,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv8i32_nxv8i32: @@ -211,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vv_nxv16i32_nxv16i32: @@ -236,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv1i32_i32: @@ -258,12 +186,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv2i32_i32: @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv4i32_i32: @@ -302,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv8i32_i32: @@ -324,12 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotsu_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_vx_nxv16i32_i32: @@ -346,13 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv1i32_i32: @@ -370,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv2i32_i32: @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv4i32_i32: @@ -418,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv8i32_i32: @@ -442,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotsu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotsu_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotsu_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll b/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll index 2e6528da43b35..1c9f42ead5f70 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdotu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdotu.nxv1i32.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vqdotu_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv2i32.nxv2i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv2i32_nxv2i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv4i32_nxv4i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv8i32_nxv8i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vv_nxv16i32_nxv16i32: @@ -115,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv1i32_nxv1i32: @@ -139,13 +101,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv2i32_nxv2i32: @@ -163,13 +118,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv4i32_nxv4i32: @@ -187,13 +135,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv8i32_nxv8i32: @@ -211,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vv_nxv16i32_nxv16i32: @@ -236,12 +170,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv1i32_i32: @@ -258,12 +186,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv2i32_i32: @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv4i32_i32: @@ -302,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv8i32_i32: @@ -324,12 +234,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotu_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_vx_nxv16i32_i32: @@ -346,13 +250,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv1i32_i32: @@ -370,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv2i32_i32: @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv4i32_i32: @@ -418,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv8i32_i32: @@ -442,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotu_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotu_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll b/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll index 94413369dd995..e4df6e146eef2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vqdotus.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+experimental-zvqdotq \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vqdotus.nxv1i32.i32( - , - , - i32, - iXLen, - iXLen); - define @intrinsic_vqdotus_vx_nxv1i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv2i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv2i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv2i32_i32: @@ -48,12 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv4i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv4i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv4i32_i32: @@ -70,12 +51,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv8i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv8i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv8i32_i32: @@ -92,12 +67,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.nxv16i32.i32( - , - , - i32, - iXLen, - iXLen); define @intrinsic_vqdotus_vx_nxv16i32_i32( %0, %1, i32 %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_vx_nxv16i32_i32: @@ -114,13 +83,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv1i32_i32: @@ -138,13 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv2i32_i32: @@ -162,13 +117,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv4i32_i32: @@ -186,13 +134,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv8i32_i32: @@ -210,13 +151,6 @@ entry: ret %a } -declare @llvm.riscv.vqdotus.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); define @intrinsic_vqdotus_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %m, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vqdotus_mask_vx_nxv16i32_i32: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand.ll b/llvm/test/CodeGen/RISCV/rvv/vredand.ll index e33a821e38487..b10cf0ad763c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredand.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredand.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax.ll index 52ace9b687b80..faa14eba96513 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredmax.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll index 1a56a66aaa306..dbafe1a9172dd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredmaxu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin.ll index 26c7ea86c1e84..b177122a63caf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredmin.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu.ll index 24c16176ecce4..7b3a7a01bcc37 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredminu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredminu.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor.ll b/llvm/test/CodeGen/RISCV/rvv/vredor.ll index c25e4de414c4f..7b4b900c2645f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredor.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum.ll index 3fb2ea3a48095..ce452ed4cf5dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredsum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredsum.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll index 274ac18deb273..70150d59e729c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.vector.reduce.fadd.nxv1f16(half, ) - define half @vreduce_fadd_nxv1f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f16: ; CHECK: # %bb.0: @@ -30,8 +28,6 @@ define half @vreduce_ord_fadd_nxv1f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv2f16(half, ) - define half @vreduce_fadd_nxv2f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f16: ; CHECK: # %bb.0: @@ -56,8 +52,6 @@ define half @vreduce_ord_fadd_nxv2f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv4f16(half, ) - define half @vreduce_fadd_nxv4f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f16: ; CHECK: # %bb.0: @@ -82,8 +76,6 @@ define half @vreduce_ord_fadd_nxv4f16( %v, half %s) { ret half %red } -declare float @llvm.vector.reduce.fadd.nxv1f32(float, ) - define float @vreduce_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f32: ; CHECK: # %bb.0: @@ -138,8 +130,6 @@ define float @vreduce_ord_fwadd_nxv1f32( %v, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.nxv2f32(float, ) - define float @vreduce_fadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f32: ; CHECK: # %bb.0: @@ -194,8 +184,6 @@ define float @vreduce_ord_fwadd_nxv2f32( %v, float %s) { ret float %red } -declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) - define float @vreduce_fadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f32: ; CHECK: # %bb.0: @@ -250,8 +238,6 @@ define float @vreduce_ord_fwadd_nxv4f32( %v, float %s) { ret float %red } -declare double @llvm.vector.reduce.fadd.nxv1f64(double, ) - define double @vreduce_fadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f64: ; CHECK: # %bb.0: @@ -306,8 +292,6 @@ define double @vreduce_ord_fwadd_nxv1f64( %v, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.nxv2f64(double, ) - define double @vreduce_fadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f64: ; CHECK: # %bb.0: @@ -362,8 +346,6 @@ define double @vreduce_ord_fwadd_nxv2f64( %v, double %s) { ret double %red } -declare double @llvm.vector.reduce.fadd.nxv4f64(double, ) - define double @vreduce_fadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f64: ; CHECK: # %bb.0: @@ -418,8 +400,6 @@ define double @vreduce_ord_fwadd_nxv4f64( %v, double %s) { ret double %red } -declare half @llvm.vector.reduce.fmin.nxv1f16() - define half @vreduce_fmin_nxv1f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f16: ; CHECK: # %bb.0: @@ -453,8 +433,6 @@ define half @vreduce_fmin_nxv1f16_nonans_noinfs( %v) #1 { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv2f16() - define half @vreduce_fmin_nxv2f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f16: ; CHECK: # %bb.0: @@ -466,8 +444,6 @@ define half @vreduce_fmin_nxv2f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv4f16() - define half @vreduce_fmin_nxv4f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f16: ; CHECK: # %bb.0: @@ -479,8 +455,6 @@ define half @vreduce_fmin_nxv4f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv64f16() - define half @vreduce_fmin_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv64f16: ; CHECK: # %bb.0: @@ -493,8 +467,6 @@ define half @vreduce_fmin_nxv64f16( %v) { ret half %red } -declare float @llvm.vector.reduce.fmin.nxv1f32() - define float @vreduce_fmin_nxv1f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f32: ; CHECK: # %bb.0: @@ -528,8 +500,6 @@ define float @vreduce_fmin_nxv1f32_nonans_noinfs( %v) { ret float %red } -declare float @llvm.vector.reduce.fmin.nxv2f32() - define float @vreduce_fmin_nxv2f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f32: ; CHECK: # %bb.0: @@ -541,8 +511,6 @@ define float @vreduce_fmin_nxv2f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmin.nxv4f32() - define float @vreduce_fmin_nxv4f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f32: ; CHECK: # %bb.0: @@ -554,8 +522,6 @@ define float @vreduce_fmin_nxv4f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmin.nxv32f32() - define float @vreduce_fmin_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv32f32: ; CHECK: # %bb.0: @@ -568,8 +534,6 @@ define float @vreduce_fmin_nxv32f32( %v) { ret float %red } -declare double @llvm.vector.reduce.fmin.nxv1f64() - define double @vreduce_fmin_nxv1f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f64: ; CHECK: # %bb.0: @@ -603,8 +567,6 @@ define double @vreduce_fmin_nxv1f64_nonans_noinfs( %v) { ret double %red } -declare double @llvm.vector.reduce.fmin.nxv2f64() - define double @vreduce_fmin_nxv2f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f64: ; CHECK: # %bb.0: @@ -616,8 +578,6 @@ define double @vreduce_fmin_nxv2f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmin.nxv4f64() - define double @vreduce_fmin_nxv4f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f64: ; CHECK: # %bb.0: @@ -629,8 +589,6 @@ define double @vreduce_fmin_nxv4f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmin.nxv16f64() - define double @vreduce_fmin_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv16f64: ; CHECK: # %bb.0: @@ -643,8 +601,6 @@ define double @vreduce_fmin_nxv16f64( %v) { ret double %red } -declare half @llvm.vector.reduce.fmax.nxv1f16() - define half @vreduce_fmax_nxv1f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f16: ; CHECK: # %bb.0: @@ -678,8 +634,6 @@ define half @vreduce_fmax_nxv1f16_nonans_noinfs( %v) #1 { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv2f16() - define half @vreduce_fmax_nxv2f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f16: ; CHECK: # %bb.0: @@ -691,8 +645,6 @@ define half @vreduce_fmax_nxv2f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv4f16() - define half @vreduce_fmax_nxv4f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f16: ; CHECK: # %bb.0: @@ -704,8 +656,6 @@ define half @vreduce_fmax_nxv4f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv64f16() - define half @vreduce_fmax_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv64f16: ; CHECK: # %bb.0: @@ -718,8 +668,6 @@ define half @vreduce_fmax_nxv64f16( %v) { ret half %red } -declare float @llvm.vector.reduce.fmax.nxv1f32() - define float @vreduce_fmax_nxv1f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f32: ; CHECK: # %bb.0: @@ -753,8 +701,6 @@ define float @vreduce_fmax_nxv1f32_nonans_noinfs( %v) { ret float %red } -declare float @llvm.vector.reduce.fmax.nxv2f32() - define float @vreduce_fmax_nxv2f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f32: ; CHECK: # %bb.0: @@ -766,8 +712,6 @@ define float @vreduce_fmax_nxv2f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmax.nxv4f32() - define float @vreduce_fmax_nxv4f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f32: ; CHECK: # %bb.0: @@ -779,8 +723,6 @@ define float @vreduce_fmax_nxv4f32( %v) { ret float %red } -declare float @llvm.vector.reduce.fmax.nxv32f32() - define float @vreduce_fmax_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv32f32: ; CHECK: # %bb.0: @@ -793,8 +735,6 @@ define float @vreduce_fmax_nxv32f32( %v) { ret float %red } -declare double @llvm.vector.reduce.fmax.nxv1f64() - define double @vreduce_fmax_nxv1f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f64: ; CHECK: # %bb.0: @@ -828,8 +768,6 @@ define double @vreduce_fmax_nxv1f64_nonans_noinfs( %v) { ret double %red } -declare double @llvm.vector.reduce.fmax.nxv2f64() - define double @vreduce_fmax_nxv2f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f64: ; CHECK: # %bb.0: @@ -841,8 +779,6 @@ define double @vreduce_fmax_nxv2f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmax.nxv4f64() - define double @vreduce_fmax_nxv4f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f64: ; CHECK: # %bb.0: @@ -854,8 +790,6 @@ define double @vreduce_fmax_nxv4f64( %v) { ret double %red } -declare double @llvm.vector.reduce.fmax.nxv16f64() - define double @vreduce_fmax_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv16f64: ; CHECK: # %bb.0: @@ -881,7 +815,6 @@ define float @vreduce_nsz_fadd_nxv1f32( %v, float %s) { } ; Test Widen VECREDUCE_SEQ_FADD -declare half @llvm.vector.reduce.fadd.nxv3f16(half, ) define half @vreduce_ord_fadd_nxv3f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv3f16: @@ -900,8 +833,6 @@ define half @vreduce_ord_fadd_nxv3f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv6f16(half, ) - define half @vreduce_ord_fadd_nxv6f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv6f16: ; CHECK: # %bb.0: @@ -918,8 +849,6 @@ define half @vreduce_ord_fadd_nxv6f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv10f16(half, ) - define half @vreduce_ord_fadd_nxv10f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv10f16: ; CHECK: # %bb.0: @@ -936,8 +865,6 @@ define half @vreduce_ord_fadd_nxv10f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fadd.nxv12f16(half, ) - define half @vreduce_ord_fadd_nxv12f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv12f16: ; CHECK: # %bb.0: @@ -992,8 +919,6 @@ define half @vreduce_fadd_nxv6f16( %v, half %s) { ret half %red } -declare half @llvm.vector.reduce.fmin.nxv10f16() - define half @vreduce_fmin_nxv10f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv10f16: ; CHECK: # %bb.0: @@ -1012,8 +937,6 @@ define half @vreduce_fmin_nxv10f16( %v) { ret half %red } -declare half @llvm.vector.reduce.fmax.nxv12f16() - define half @vreduce_fmax_nxv12f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv12f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll index 012fca0b1fe3d..df97f19df7f99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare half @llvm.vp.reduce.fadd.nxv1f16(half, , , i32) - define half @vpreduce_fadd_nxv1f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f16: ; CHECK: # %bb.0: @@ -32,8 +30,6 @@ define half @vpreduce_ord_fadd_nxv1f16(half %s, %v, , , i32) - define half @vpreduce_fadd_nxv2f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f16: ; CHECK: # %bb.0: @@ -60,8 +56,6 @@ define half @vpreduce_ord_fadd_nxv2f16(half %s, %v, , , i32) - define half @vpreduce_fadd_nxv4f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f16: ; CHECK: # %bb.0: @@ -88,8 +82,6 @@ define half @vpreduce_ord_fadd_nxv4f16(half %s, %v, , , i32) - define half @vpreduce_fadd_nxv64f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv64f16: ; CHECK: # %bb.0: @@ -148,8 +140,6 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, %v, , , i32) - define float @vpreduce_fadd_nxv1f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f32: ; CHECK: # %bb.0: @@ -176,8 +166,6 @@ define float @vpreduce_ord_fadd_nxv1f32(float %s, %v, , , i32) - define float @vpreduce_fadd_nxv2f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f32: ; CHECK: # %bb.0: @@ -204,8 +192,6 @@ define float @vpreduce_ord_fadd_nxv2f32(float %s, %v, , , i32) - define float @vpreduce_fadd_nxv4f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f32: ; CHECK: # %bb.0: @@ -232,8 +218,6 @@ define float @vpreduce_ord_fadd_nxv4f32(float %s, %v, , , i32) - define double @vpreduce_fadd_nxv1f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f64: ; CHECK: # %bb.0: @@ -260,8 +244,6 @@ define double @vpreduce_ord_fadd_nxv1f64(double %s, %v, , , i32) - define double @vpreduce_fadd_nxv2f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f64: ; CHECK: # %bb.0: @@ -288,8 +270,6 @@ define double @vpreduce_ord_fadd_nxv2f64(double %s, %v, , , i32) - define double @vpreduce_fadd_nxv3f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv3f64: ; CHECK: # %bb.0: @@ -316,8 +296,6 @@ define double @vpreduce_ord_fadd_nxv3f64(double %s, %v, , , i32) - define double @vpreduce_fadd_nxv4f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll index 7c6782fc1dcd4..7eea35afe0aa0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vp.reduce.add.nxv1i8(i8, , , i32) - define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i8: ; CHECK: # %bb.0: @@ -19,8 +17,6 @@ define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umax_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i8: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define signext i8 @vpreduce_umax_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i8: ; CHECK: # %bb.0: @@ -49,8 +43,6 @@ define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umin_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define signext i8 @vpreduce_umin_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i8: ; CHECK: # %bb.0: @@ -79,8 +69,6 @@ define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i8: ; CHECK: # %bb.0: @@ -94,8 +82,6 @@ define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i8: ; CHECK: # %bb.0: @@ -109,8 +95,6 @@ define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i8: ; CHECK: # %bb.0: @@ -124,8 +108,6 @@ define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i8: ; CHECK: # %bb.0: @@ -139,8 +121,6 @@ define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umax_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i8: ; CHECK: # %bb.0: @@ -154,8 +134,6 @@ define signext i8 @vpreduce_umax_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i8: ; CHECK: # %bb.0: @@ -169,8 +147,6 @@ define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umin_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i8: ; CHECK: # %bb.0: @@ -184,8 +160,6 @@ define signext i8 @vpreduce_umin_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i8: ; CHECK: # %bb.0: @@ -199,8 +173,6 @@ define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i8: ; CHECK: # %bb.0: @@ -214,8 +186,6 @@ define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i8: ; CHECK: # %bb.0: @@ -229,8 +199,6 @@ define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i8: ; CHECK: # %bb.0: @@ -244,8 +212,6 @@ define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv3i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv3i8: ; CHECK: # %bb.0: @@ -259,8 +225,6 @@ define signext i8 @vpreduce_smax_nxv3i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i8: ; CHECK: # %bb.0: @@ -274,8 +238,6 @@ define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umax_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i8: ; CHECK: # %bb.0: @@ -289,8 +251,6 @@ define signext i8 @vpreduce_umax_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i8: ; CHECK: # %bb.0: @@ -304,8 +264,6 @@ define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_umin_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i8: ; CHECK: # %bb.0: @@ -319,8 +277,6 @@ define signext i8 @vpreduce_umin_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i8: ; CHECK: # %bb.0: @@ -334,8 +290,6 @@ define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i8: ; CHECK: # %bb.0: @@ -349,8 +303,6 @@ define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i8: ; CHECK: # %bb.0: @@ -364,8 +316,6 @@ define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, %v, , , i32) - define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i8: ; CHECK: # %bb.0: @@ -379,8 +329,6 @@ define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, %v, , , i32) - define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i16: ; CHECK: # %bb.0: @@ -394,8 +342,6 @@ define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umax.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i16: ; CHECK: # %bb.0: @@ -409,8 +355,6 @@ define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smax.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i16: ; CHECK: # %bb.0: @@ -424,8 +368,6 @@ define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umin.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i16: ; CHECK: # %bb.0: @@ -439,8 +381,6 @@ define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smin.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i16: ; CHECK: # %bb.0: @@ -454,8 +394,6 @@ define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.and.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i16: ; CHECK: # %bb.0: @@ -469,8 +407,6 @@ define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.or.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i16: ; CHECK: # %bb.0: @@ -484,8 +420,6 @@ define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, %v, < ret i16 %r } -declare i16 @llvm.vp.reduce.xor.nxv1i16(i16, , , i32) - define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i16: ; CHECK: # %bb.0: @@ -499,8 +433,6 @@ define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.add.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i16: ; CHECK: # %bb.0: @@ -514,8 +446,6 @@ define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umax.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i16: ; CHECK: # %bb.0: @@ -529,8 +459,6 @@ define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smax.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i16: ; CHECK: # %bb.0: @@ -544,8 +472,6 @@ define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umin.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i16: ; CHECK: # %bb.0: @@ -559,8 +485,6 @@ define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smin.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i16: ; CHECK: # %bb.0: @@ -574,8 +498,6 @@ define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.and.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i16: ; CHECK: # %bb.0: @@ -589,8 +511,6 @@ define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.or.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i16: ; CHECK: # %bb.0: @@ -604,8 +524,6 @@ define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, %v, < ret i16 %r } -declare i16 @llvm.vp.reduce.xor.nxv2i16(i16, , , i32) - define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i16: ; CHECK: # %bb.0: @@ -619,8 +537,6 @@ define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.add.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i16: ; CHECK: # %bb.0: @@ -634,8 +550,6 @@ define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umax.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i16: ; CHECK: # %bb.0: @@ -649,8 +563,6 @@ define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smax.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i16: ; CHECK: # %bb.0: @@ -664,8 +576,6 @@ define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.umin.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i16: ; CHECK: # %bb.0: @@ -679,8 +589,6 @@ define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.smin.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i16: ; CHECK: # %bb.0: @@ -694,8 +602,6 @@ define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.and.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i16: ; CHECK: # %bb.0: @@ -709,8 +615,6 @@ define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i16 @llvm.vp.reduce.or.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i16: ; CHECK: # %bb.0: @@ -724,8 +628,6 @@ define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, %v, < ret i16 %r } -declare i16 @llvm.vp.reduce.xor.nxv4i16(i16, , , i32) - define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i16: ; CHECK: # %bb.0: @@ -739,8 +641,6 @@ define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, %v, ret i16 %r } -declare i32 @llvm.vp.reduce.add.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i32: ; CHECK: # %bb.0: @@ -754,8 +654,6 @@ define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i32: ; CHECK: # %bb.0: @@ -769,8 +667,6 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smax.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i32: ; CHECK: # %bb.0: @@ -784,8 +680,6 @@ define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umin.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i32: ; CHECK: # %bb.0: @@ -799,8 +693,6 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smin.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i32: ; CHECK: # %bb.0: @@ -814,8 +706,6 @@ define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.and.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i32: ; CHECK: # %bb.0: @@ -829,8 +719,6 @@ define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.or.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i32: ; CHECK: # %bb.0: @@ -844,8 +732,6 @@ define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, %v, < ret i32 %r } -declare i32 @llvm.vp.reduce.xor.nxv1i32(i32, , , i32) - define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i32: ; CHECK: # %bb.0: @@ -859,8 +745,6 @@ define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.add.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i32: ; CHECK: # %bb.0: @@ -874,8 +758,6 @@ define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i32: ; CHECK: # %bb.0: @@ -889,8 +771,6 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smax.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i32: ; CHECK: # %bb.0: @@ -904,8 +784,6 @@ define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umin.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i32: ; CHECK: # %bb.0: @@ -919,8 +797,6 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smin.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i32: ; CHECK: # %bb.0: @@ -934,8 +810,6 @@ define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.and.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i32: ; CHECK: # %bb.0: @@ -949,8 +823,6 @@ define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.or.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i32: ; CHECK: # %bb.0: @@ -964,8 +836,6 @@ define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, %v, < ret i32 %r } -declare i32 @llvm.vp.reduce.xor.nxv2i32(i32, , , i32) - define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i32: ; CHECK: # %bb.0: @@ -979,8 +849,6 @@ define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.add.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i32: ; CHECK: # %bb.0: @@ -994,8 +862,6 @@ define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i32: ; CHECK: # %bb.0: @@ -1009,8 +875,6 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umax.nxv32i32(i32, , , i32) - define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv32i32: ; CHECK: # %bb.0: @@ -1040,8 +904,6 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, % ret i32 %r } -declare i32 @llvm.vp.reduce.smax.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i32: ; CHECK: # %bb.0: @@ -1055,8 +917,6 @@ define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.umin.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i32: ; CHECK: # %bb.0: @@ -1070,8 +930,6 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.smin.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i32: ; CHECK: # %bb.0: @@ -1085,8 +943,6 @@ define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.and.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i32: ; CHECK: # %bb.0: @@ -1100,8 +956,6 @@ define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i32 @llvm.vp.reduce.or.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i32: ; CHECK: # %bb.0: @@ -1115,8 +969,6 @@ define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, %v, < ret i32 %r } -declare i32 @llvm.vp.reduce.xor.nxv4i32(i32, , , i32) - define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i32: ; CHECK: # %bb.0: @@ -1130,8 +982,6 @@ define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, %v, ret i32 %r } -declare i64 @llvm.vp.reduce.add.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_add_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_nxv1i64: ; RV32: # %bb.0: @@ -1235,8 +1085,6 @@ define signext i64 @vpwreduce_uadd_nxv1i32(i64 signext %s, %v ret i64 %r } -declare i64 @llvm.vp.reduce.umax.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv1i64: ; RV32: # %bb.0: @@ -1270,8 +1118,6 @@ define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smax.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_nxv1i64: ; RV32: # %bb.0: @@ -1305,8 +1151,6 @@ define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.umin.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv1i64: ; RV32: # %bb.0: @@ -1340,8 +1184,6 @@ define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smin.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_nxv1i64: ; RV32: # %bb.0: @@ -1375,8 +1217,6 @@ define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.and.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_nxv1i64: ; RV32: # %bb.0: @@ -1410,8 +1250,6 @@ define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.or.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_nxv1i64: ; RV32: # %bb.0: @@ -1445,8 +1283,6 @@ define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, %v, < ret i64 %r } -declare i64 @llvm.vp.reduce.xor.nxv1i64(i64, , , i32) - define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_nxv1i64: ; RV32: # %bb.0: @@ -1480,8 +1316,6 @@ define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.add.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_add_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_nxv2i64: ; RV32: # %bb.0: @@ -1585,8 +1419,6 @@ define signext i64 @vwpreduce_uadd_nxv2i32(i64 signext %s, %v ret i64 %r } -declare i64 @llvm.vp.reduce.umax.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv2i64: ; RV32: # %bb.0: @@ -1620,8 +1452,6 @@ define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smax.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_nxv2i64: ; RV32: # %bb.0: @@ -1655,8 +1485,6 @@ define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.umin.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv2i64: ; RV32: # %bb.0: @@ -1690,8 +1518,6 @@ define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smin.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_nxv2i64: ; RV32: # %bb.0: @@ -1725,8 +1551,6 @@ define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.and.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_nxv2i64: ; RV32: # %bb.0: @@ -1760,8 +1584,6 @@ define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.or.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_nxv2i64: ; RV32: # %bb.0: @@ -1795,8 +1617,6 @@ define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, %v, < ret i64 %r } -declare i64 @llvm.vp.reduce.xor.nxv2i64(i64, , , i32) - define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_nxv2i64: ; RV32: # %bb.0: @@ -1830,8 +1650,6 @@ define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.add.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_add_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_nxv4i64: ; RV32: # %bb.0: @@ -1935,8 +1753,6 @@ define signext i64 @vpwreduce_uadd_nxv4i32(i64 signext %s, %v ret i64 %r } -declare i64 @llvm.vp.reduce.umax.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv4i64: ; RV32: # %bb.0: @@ -1970,8 +1786,6 @@ define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smax.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_nxv4i64: ; RV32: # %bb.0: @@ -2005,8 +1819,6 @@ define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.umin.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv4i64: ; RV32: # %bb.0: @@ -2040,8 +1852,6 @@ define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.smin.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_nxv4i64: ; RV32: # %bb.0: @@ -2075,8 +1885,6 @@ define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.and.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_nxv4i64: ; RV32: # %bb.0: @@ -2110,8 +1918,6 @@ define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, %v, ret i64 %r } -declare i64 @llvm.vp.reduce.or.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_nxv4i64: ; RV32: # %bb.0: @@ -2145,8 +1951,6 @@ define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, %v, < ret i64 %r } -declare i64 @llvm.vp.reduce.xor.nxv4i64(i64, , , i32) - define signext i64 @vpreduce_xor_nxv4i64(i64 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_nxv4i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll index fac5e31ecf94e..d575b6c69dc3b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare i8 @llvm.vector.reduce.add.nxv1i8() - define signext i8 @vreduce_add_nxv1i8( %v) { ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define signext i8 @vreduce_add_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.nxv1i8() - define signext i8 @vreduce_umax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: @@ -31,8 +27,6 @@ define signext i8 @vreduce_umax_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.nxv1i8() - define signext i8 @vreduce_smax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: @@ -44,8 +38,6 @@ define signext i8 @vreduce_smax_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.nxv1i8() - define signext i8 @vreduce_umin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i8: ; CHECK: # %bb.0: @@ -57,8 +49,6 @@ define signext i8 @vreduce_umin_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.nxv1i8() - define signext i8 @vreduce_smin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: @@ -70,8 +60,6 @@ define signext i8 @vreduce_smin_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.nxv1i8() - define signext i8 @vreduce_and_nxv1i8( %v) { ; CHECK-LABEL: vreduce_and_nxv1i8: ; CHECK: # %bb.0: @@ -83,8 +71,6 @@ define signext i8 @vreduce_and_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.nxv1i8() - define signext i8 @vreduce_or_nxv1i8( %v) { ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: @@ -96,8 +82,6 @@ define signext i8 @vreduce_or_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.nxv1i8() - define signext i8 @vreduce_xor_nxv1i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: @@ -110,8 +94,6 @@ define signext i8 @vreduce_xor_nxv1i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.nxv2i8() - define signext i8 @vreduce_add_nxv2i8( %v) { ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: @@ -124,8 +106,6 @@ define signext i8 @vreduce_add_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.nxv2i8() - define signext i8 @vreduce_umax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: @@ -137,8 +117,6 @@ define signext i8 @vreduce_umax_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.nxv2i8() - define signext i8 @vreduce_smax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: @@ -150,8 +128,6 @@ define signext i8 @vreduce_smax_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.nxv2i8() - define signext i8 @vreduce_umin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i8: ; CHECK: # %bb.0: @@ -163,8 +139,6 @@ define signext i8 @vreduce_umin_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.nxv2i8() - define signext i8 @vreduce_smin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: @@ -176,8 +150,6 @@ define signext i8 @vreduce_smin_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.nxv2i8() - define signext i8 @vreduce_and_nxv2i8( %v) { ; CHECK-LABEL: vreduce_and_nxv2i8: ; CHECK: # %bb.0: @@ -189,8 +161,6 @@ define signext i8 @vreduce_and_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.nxv2i8() - define signext i8 @vreduce_or_nxv2i8( %v) { ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: @@ -202,8 +172,6 @@ define signext i8 @vreduce_or_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.nxv2i8() - define signext i8 @vreduce_xor_nxv2i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: @@ -216,8 +184,6 @@ define signext i8 @vreduce_xor_nxv2i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.add.nxv4i8() - define signext i8 @vreduce_add_nxv4i8( %v) { ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: @@ -230,8 +196,6 @@ define signext i8 @vreduce_add_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umax.nxv4i8() - define signext i8 @vreduce_umax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: @@ -243,8 +207,6 @@ define signext i8 @vreduce_umax_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smax.nxv4i8() - define signext i8 @vreduce_smax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: @@ -256,8 +218,6 @@ define signext i8 @vreduce_smax_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.umin.nxv4i8() - define signext i8 @vreduce_umin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i8: ; CHECK: # %bb.0: @@ -269,8 +229,6 @@ define signext i8 @vreduce_umin_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.smin.nxv4i8() - define signext i8 @vreduce_smin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: @@ -282,8 +240,6 @@ define signext i8 @vreduce_smin_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.and.nxv4i8() - define signext i8 @vreduce_and_nxv4i8( %v) { ; CHECK-LABEL: vreduce_and_nxv4i8: ; CHECK: # %bb.0: @@ -295,8 +251,6 @@ define signext i8 @vreduce_and_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.or.nxv4i8() - define signext i8 @vreduce_or_nxv4i8( %v) { ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: @@ -308,8 +262,6 @@ define signext i8 @vreduce_or_nxv4i8( %v) { ret i8 %red } -declare i8 @llvm.vector.reduce.xor.nxv4i8() - define signext i8 @vreduce_xor_nxv4i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: @@ -322,8 +274,6 @@ define signext i8 @vreduce_xor_nxv4i8( %v) { ret i8 %red } -declare i16 @llvm.vector.reduce.add.nxv1i16() - define signext i16 @vreduce_add_nxv1i16( %v) { ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: @@ -366,8 +316,6 @@ define signext i16 @vwreduce_uadd_nxv1i8( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.nxv1i16() - define signext i16 @vreduce_umax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: @@ -379,8 +327,6 @@ define signext i16 @vreduce_umax_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.nxv1i16() - define signext i16 @vreduce_smax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i16: ; CHECK: # %bb.0: @@ -392,8 +338,6 @@ define signext i16 @vreduce_smax_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.nxv1i16() - define signext i16 @vreduce_umin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i16: ; CHECK: # %bb.0: @@ -405,8 +349,6 @@ define signext i16 @vreduce_umin_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.nxv1i16() - define signext i16 @vreduce_smin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i16: ; CHECK: # %bb.0: @@ -418,8 +360,6 @@ define signext i16 @vreduce_smin_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.nxv1i16() - define signext i16 @vreduce_and_nxv1i16( %v) { ; CHECK-LABEL: vreduce_and_nxv1i16: ; CHECK: # %bb.0: @@ -431,8 +371,6 @@ define signext i16 @vreduce_and_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.nxv1i16() - define signext i16 @vreduce_or_nxv1i16( %v) { ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: @@ -444,8 +382,6 @@ define signext i16 @vreduce_or_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.nxv1i16() - define signext i16 @vreduce_xor_nxv1i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: @@ -458,8 +394,6 @@ define signext i16 @vreduce_xor_nxv1i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.nxv2i16() - define signext i16 @vreduce_add_nxv2i16( %v) { ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: @@ -502,8 +436,6 @@ define signext i16 @vwreduce_uadd_nxv2i8( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.nxv2i16() - define signext i16 @vreduce_umax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: @@ -515,8 +447,6 @@ define signext i16 @vreduce_umax_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.nxv2i16() - define signext i16 @vreduce_smax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i16: ; CHECK: # %bb.0: @@ -528,8 +458,6 @@ define signext i16 @vreduce_smax_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.nxv2i16() - define signext i16 @vreduce_umin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i16: ; CHECK: # %bb.0: @@ -541,8 +469,6 @@ define signext i16 @vreduce_umin_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.nxv2i16() - define signext i16 @vreduce_smin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i16: ; CHECK: # %bb.0: @@ -554,8 +480,6 @@ define signext i16 @vreduce_smin_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.nxv2i16() - define signext i16 @vreduce_and_nxv2i16( %v) { ; CHECK-LABEL: vreduce_and_nxv2i16: ; CHECK: # %bb.0: @@ -567,8 +491,6 @@ define signext i16 @vreduce_and_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.nxv2i16() - define signext i16 @vreduce_or_nxv2i16( %v) { ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: @@ -580,8 +502,6 @@ define signext i16 @vreduce_or_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.nxv2i16() - define signext i16 @vreduce_xor_nxv2i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: @@ -594,8 +514,6 @@ define signext i16 @vreduce_xor_nxv2i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.add.nxv4i16() - define signext i16 @vreduce_add_nxv4i16( %v) { ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: @@ -638,8 +556,6 @@ define signext i16 @vwreduce_uadd_nxv4i8( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umax.nxv4i16() - define signext i16 @vreduce_umax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: @@ -651,8 +567,6 @@ define signext i16 @vreduce_umax_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smax.nxv4i16() - define signext i16 @vreduce_smax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i16: ; CHECK: # %bb.0: @@ -664,8 +578,6 @@ define signext i16 @vreduce_smax_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.umin.nxv4i16() - define signext i16 @vreduce_umin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i16: ; CHECK: # %bb.0: @@ -677,8 +589,6 @@ define signext i16 @vreduce_umin_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.smin.nxv4i16() - define signext i16 @vreduce_smin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i16: ; CHECK: # %bb.0: @@ -690,8 +600,6 @@ define signext i16 @vreduce_smin_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.and.nxv4i16() - define signext i16 @vreduce_and_nxv4i16( %v) { ; CHECK-LABEL: vreduce_and_nxv4i16: ; CHECK: # %bb.0: @@ -703,8 +611,6 @@ define signext i16 @vreduce_and_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.or.nxv4i16() - define signext i16 @vreduce_or_nxv4i16( %v) { ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: @@ -716,8 +622,6 @@ define signext i16 @vreduce_or_nxv4i16( %v) { ret i16 %red } -declare i16 @llvm.vector.reduce.xor.nxv4i16() - define signext i16 @vreduce_xor_nxv4i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: @@ -730,8 +634,6 @@ define signext i16 @vreduce_xor_nxv4i16( %v) { ret i16 %red } -declare i32 @llvm.vector.reduce.add.nxv1i32() - define signext i32 @vreduce_add_nxv1i32( %v) { ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: @@ -774,8 +676,6 @@ define signext i32 @vwreduce_uadd_nxv1i16( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.nxv1i32() - define signext i32 @vreduce_umax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: @@ -787,8 +687,6 @@ define signext i32 @vreduce_umax_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.nxv1i32() - define signext i32 @vreduce_smax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i32: ; CHECK: # %bb.0: @@ -800,8 +698,6 @@ define signext i32 @vreduce_smax_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.nxv1i32() - define signext i32 @vreduce_umin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i32: ; CHECK: # %bb.0: @@ -813,8 +709,6 @@ define signext i32 @vreduce_umin_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.nxv1i32() - define signext i32 @vreduce_smin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i32: ; CHECK: # %bb.0: @@ -826,8 +720,6 @@ define signext i32 @vreduce_smin_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.nxv1i32() - define signext i32 @vreduce_and_nxv1i32( %v) { ; CHECK-LABEL: vreduce_and_nxv1i32: ; CHECK: # %bb.0: @@ -839,8 +731,6 @@ define signext i32 @vreduce_and_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.nxv1i32() - define signext i32 @vreduce_or_nxv1i32( %v) { ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: @@ -852,8 +742,6 @@ define signext i32 @vreduce_or_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.nxv1i32() - define signext i32 @vreduce_xor_nxv1i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: @@ -866,8 +754,6 @@ define signext i32 @vreduce_xor_nxv1i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.nxv2i32() - define signext i32 @vreduce_add_nxv2i32( %v) { ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: @@ -910,8 +796,6 @@ define signext i32 @vwreduce_uadd_nxv2i16( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.nxv2i32() - define signext i32 @vreduce_umax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: @@ -923,8 +807,6 @@ define signext i32 @vreduce_umax_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.nxv2i32() - define signext i32 @vreduce_smax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i32: ; CHECK: # %bb.0: @@ -936,8 +818,6 @@ define signext i32 @vreduce_smax_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.nxv2i32() - define signext i32 @vreduce_umin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i32: ; CHECK: # %bb.0: @@ -949,8 +829,6 @@ define signext i32 @vreduce_umin_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.nxv2i32() - define signext i32 @vreduce_smin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i32: ; CHECK: # %bb.0: @@ -962,8 +840,6 @@ define signext i32 @vreduce_smin_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.nxv2i32() - define signext i32 @vreduce_and_nxv2i32( %v) { ; CHECK-LABEL: vreduce_and_nxv2i32: ; CHECK: # %bb.0: @@ -975,8 +851,6 @@ define signext i32 @vreduce_and_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.nxv2i32() - define signext i32 @vreduce_or_nxv2i32( %v) { ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: @@ -988,8 +862,6 @@ define signext i32 @vreduce_or_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.nxv2i32() - define signext i32 @vreduce_xor_nxv2i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: @@ -1002,8 +874,6 @@ define signext i32 @vreduce_xor_nxv2i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.add.nxv4i32() - define signext i32 @vreduce_add_nxv4i32( %v) { ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: @@ -1046,8 +916,6 @@ define signext i32 @vwreduce_uadd_nxv4i16( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umax.nxv4i32() - define signext i32 @vreduce_umax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: @@ -1059,8 +927,6 @@ define signext i32 @vreduce_umax_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smax.nxv4i32() - define signext i32 @vreduce_smax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i32: ; CHECK: # %bb.0: @@ -1072,8 +938,6 @@ define signext i32 @vreduce_smax_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.umin.nxv4i32() - define signext i32 @vreduce_umin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i32: ; CHECK: # %bb.0: @@ -1085,8 +949,6 @@ define signext i32 @vreduce_umin_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.smin.nxv4i32() - define signext i32 @vreduce_smin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i32: ; CHECK: # %bb.0: @@ -1098,8 +960,6 @@ define signext i32 @vreduce_smin_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.and.nxv4i32() - define signext i32 @vreduce_and_nxv4i32( %v) { ; CHECK-LABEL: vreduce_and_nxv4i32: ; CHECK: # %bb.0: @@ -1111,8 +971,6 @@ define signext i32 @vreduce_and_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.or.nxv4i32() - define signext i32 @vreduce_or_nxv4i32( %v) { ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: @@ -1124,8 +982,6 @@ define signext i32 @vreduce_or_nxv4i32( %v) { ret i32 %red } -declare i32 @llvm.vector.reduce.xor.nxv4i32() - define signext i32 @vreduce_xor_nxv4i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: @@ -1138,8 +994,6 @@ define signext i32 @vreduce_xor_nxv4i32( %v) { ret i32 %red } -declare i64 @llvm.vector.reduce.add.nxv1i64() - define i64 @vreduce_add_nxv1i64( %v) { ; RV32-LABEL: vreduce_add_nxv1i64: ; RV32: # %bb.0: @@ -1220,8 +1074,6 @@ define i64 @vwreduce_uadd_nxv1i32( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.nxv1i64() - define i64 @vreduce_umax_nxv1i64( %v) { ; RV32-LABEL: vreduce_umax_nxv1i64: ; RV32: # %bb.0: @@ -1244,8 +1096,6 @@ define i64 @vreduce_umax_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.nxv1i64() - define i64 @vreduce_smax_nxv1i64( %v) { ; RV32-LABEL: vreduce_smax_nxv1i64: ; RV32: # %bb.0: @@ -1268,8 +1118,6 @@ define i64 @vreduce_smax_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.nxv1i64() - define i64 @vreduce_umin_nxv1i64( %v) { ; RV32-LABEL: vreduce_umin_nxv1i64: ; RV32: # %bb.0: @@ -1292,8 +1140,6 @@ define i64 @vreduce_umin_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.nxv1i64() - define i64 @vreduce_smin_nxv1i64( %v) { ; RV32-LABEL: vreduce_smin_nxv1i64: ; RV32: # %bb.0: @@ -1316,8 +1162,6 @@ define i64 @vreduce_smin_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.nxv1i64() - define i64 @vreduce_and_nxv1i64( %v) { ; RV32-LABEL: vreduce_and_nxv1i64: ; RV32: # %bb.0: @@ -1340,8 +1184,6 @@ define i64 @vreduce_and_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.nxv1i64() - define i64 @vreduce_or_nxv1i64( %v) { ; RV32-LABEL: vreduce_or_nxv1i64: ; RV32: # %bb.0: @@ -1364,8 +1206,6 @@ define i64 @vreduce_or_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.nxv1i64() - define i64 @vreduce_xor_nxv1i64( %v) { ; RV32-LABEL: vreduce_xor_nxv1i64: ; RV32: # %bb.0: @@ -1390,8 +1230,6 @@ define i64 @vreduce_xor_nxv1i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.nxv2i64() - define i64 @vreduce_add_nxv2i64( %v) { ; RV32-LABEL: vreduce_add_nxv2i64: ; RV32: # %bb.0: @@ -1472,8 +1310,6 @@ define i64 @vwreduce_uadd_nxv2i32( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.nxv2i64() - define i64 @vreduce_umax_nxv2i64( %v) { ; RV32-LABEL: vreduce_umax_nxv2i64: ; RV32: # %bb.0: @@ -1496,8 +1332,6 @@ define i64 @vreduce_umax_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.nxv2i64() - define i64 @vreduce_smax_nxv2i64( %v) { ; RV32-LABEL: vreduce_smax_nxv2i64: ; RV32: # %bb.0: @@ -1520,8 +1354,6 @@ define i64 @vreduce_smax_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.nxv2i64() - define i64 @vreduce_umin_nxv2i64( %v) { ; RV32-LABEL: vreduce_umin_nxv2i64: ; RV32: # %bb.0: @@ -1544,8 +1376,6 @@ define i64 @vreduce_umin_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.nxv2i64() - define i64 @vreduce_smin_nxv2i64( %v) { ; RV32-LABEL: vreduce_smin_nxv2i64: ; RV32: # %bb.0: @@ -1568,8 +1398,6 @@ define i64 @vreduce_smin_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.nxv2i64() - define i64 @vreduce_and_nxv2i64( %v) { ; RV32-LABEL: vreduce_and_nxv2i64: ; RV32: # %bb.0: @@ -1592,8 +1420,6 @@ define i64 @vreduce_and_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.nxv2i64() - define i64 @vreduce_or_nxv2i64( %v) { ; RV32-LABEL: vreduce_or_nxv2i64: ; RV32: # %bb.0: @@ -1616,8 +1442,6 @@ define i64 @vreduce_or_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.nxv2i64() - define i64 @vreduce_xor_nxv2i64( %v) { ; RV32-LABEL: vreduce_xor_nxv2i64: ; RV32: # %bb.0: @@ -1642,8 +1466,6 @@ define i64 @vreduce_xor_nxv2i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.add.nxv4i64() - define i64 @vreduce_add_nxv4i64( %v) { ; RV32-LABEL: vreduce_add_nxv4i64: ; RV32: # %bb.0: @@ -1724,8 +1546,6 @@ define i64 @vwreduce_uadd_nxv4i32( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umax.nxv4i64() - define i64 @vreduce_umax_nxv4i64( %v) { ; RV32-LABEL: vreduce_umax_nxv4i64: ; RV32: # %bb.0: @@ -1748,8 +1568,6 @@ define i64 @vreduce_umax_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smax.nxv4i64() - define i64 @vreduce_smax_nxv4i64( %v) { ; RV32-LABEL: vreduce_smax_nxv4i64: ; RV32: # %bb.0: @@ -1772,8 +1590,6 @@ define i64 @vreduce_smax_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.umin.nxv4i64() - define i64 @vreduce_umin_nxv4i64( %v) { ; RV32-LABEL: vreduce_umin_nxv4i64: ; RV32: # %bb.0: @@ -1796,8 +1612,6 @@ define i64 @vreduce_umin_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.smin.nxv4i64() - define i64 @vreduce_smin_nxv4i64( %v) { ; RV32-LABEL: vreduce_smin_nxv4i64: ; RV32: # %bb.0: @@ -1820,8 +1634,6 @@ define i64 @vreduce_smin_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.and.nxv4i64() - define i64 @vreduce_and_nxv4i64( %v) { ; RV32-LABEL: vreduce_and_nxv4i64: ; RV32: # %bb.0: @@ -1844,8 +1656,6 @@ define i64 @vreduce_and_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.or.nxv4i64() - define i64 @vreduce_or_nxv4i64( %v) { ; RV32-LABEL: vreduce_or_nxv4i64: ; RV32: # %bb.0: @@ -1868,8 +1678,6 @@ define i64 @vreduce_or_nxv4i64( %v) { ret i64 %red } -declare i64 @llvm.vector.reduce.xor.nxv4i64() - define i64 @vreduce_xor_nxv4i64( %v) { ; RV32-LABEL: vreduce_xor_nxv4i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll index 18d20f66987b2..1e629e9d20530 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vp.reduce.and.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_and_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i1: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define zeroext i1 @vpreduce_and_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i1: ; CHECK: # %bb.0: @@ -34,8 +30,6 @@ define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i1: ; CHECK: # %bb.0: @@ -66,8 +58,6 @@ define zeroext i1 @vpreduce_and_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i1: ; CHECK: # %bb.0: @@ -82,8 +72,6 @@ define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i1: ; CHECK: # %bb.0: @@ -98,8 +86,6 @@ define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i1: ; CHECK: # %bb.0: @@ -114,8 +100,6 @@ define zeroext i1 @vpreduce_and_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i1: ; CHECK: # %bb.0: @@ -130,8 +114,6 @@ define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i1: ; CHECK: # %bb.0: @@ -146,8 +128,6 @@ define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv8i1: ; CHECK: # %bb.0: @@ -162,8 +142,6 @@ define zeroext i1 @vpreduce_and_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv8i1: ; CHECK: # %bb.0: @@ -178,8 +156,6 @@ define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv8i1: ; CHECK: # %bb.0: @@ -194,8 +170,6 @@ define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv16i1: ; CHECK: # %bb.0: @@ -210,8 +184,6 @@ define zeroext i1 @vpreduce_and_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv16i1: ; CHECK: # %bb.0: @@ -226,8 +198,6 @@ define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv16i1: ; CHECK: # %bb.0: @@ -242,8 +212,6 @@ define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv32i1: ; CHECK: # %bb.0: @@ -258,8 +226,6 @@ define zeroext i1 @vpreduce_and_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv32i1: ; CHECK: # %bb.0: @@ -274,8 +240,6 @@ define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv32i1: ; CHECK: # %bb.0: @@ -290,8 +254,6 @@ define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv40i1: ; CHECK: # %bb.0: @@ -306,8 +268,6 @@ define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_and_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv64i1: ; CHECK: # %bb.0: @@ -322,8 +282,6 @@ define zeroext i1 @vpreduce_and_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv64i1: ; CHECK: # %bb.0: @@ -338,8 +296,6 @@ define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv64i1: ; CHECK: # %bb.0: @@ -354,8 +310,6 @@ define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv128i1: ; CHECK: # %bb.0: @@ -386,8 +340,6 @@ define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.add.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i1: ; CHECK: # %bb.0: @@ -402,8 +354,6 @@ define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i1: ; CHECK: # %bb.0: @@ -418,8 +368,6 @@ define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i1: ; CHECK: # %bb.0: @@ -434,8 +382,6 @@ define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv8i1: ; CHECK: # %bb.0: @@ -450,8 +396,6 @@ define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv16i1: ; CHECK: # %bb.0: @@ -466,8 +410,6 @@ define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv32i1: ; CHECK: # %bb.0: @@ -482,8 +424,6 @@ define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv64i1: ; CHECK: # %bb.0: @@ -498,9 +438,6 @@ define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i1: ; CHECK: # %bb.0: @@ -515,8 +452,6 @@ define zeroext i1 @vpreduce_smax_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i1: ; CHECK: # %bb.0: @@ -531,8 +466,6 @@ define zeroext i1 @vpreduce_smax_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i1: ; CHECK: # %bb.0: @@ -547,8 +480,6 @@ define zeroext i1 @vpreduce_smax_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv8i1: ; CHECK: # %bb.0: @@ -563,8 +494,6 @@ define zeroext i1 @vpreduce_smax_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smax_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv16i1: ; CHECK: # %bb.0: @@ -579,8 +508,6 @@ define zeroext i1 @vpreduce_smax_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_smax_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv32i1: ; CHECK: # %bb.0: @@ -595,8 +522,6 @@ define zeroext i1 @vpreduce_smax_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_smax_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv64i1: ; CHECK: # %bb.0: @@ -611,8 +536,6 @@ define zeroext i1 @vpreduce_smax_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i1: ; CHECK: # %bb.0: @@ -627,8 +550,6 @@ define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i1: ; CHECK: # %bb.0: @@ -643,8 +564,6 @@ define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i1: ; CHECK: # %bb.0: @@ -659,8 +578,6 @@ define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv8i1: ; CHECK: # %bb.0: @@ -675,8 +592,6 @@ define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv16i1: ; CHECK: # %bb.0: @@ -691,8 +606,6 @@ define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv32i1: ; CHECK: # %bb.0: @@ -707,8 +620,6 @@ define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv64i1: ; CHECK: # %bb.0: @@ -723,8 +634,6 @@ define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv1i1: ; CHECK: # %bb.0: @@ -739,8 +648,6 @@ define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv2i1: ; CHECK: # %bb.0: @@ -755,8 +662,6 @@ define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv4i1: ; CHECK: # %bb.0: @@ -771,8 +676,6 @@ define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv8i1: ; CHECK: # %bb.0: @@ -787,8 +690,6 @@ define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv16i1: ; CHECK: # %bb.0: @@ -803,8 +704,6 @@ define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv32i1: ; CHECK: # %bb.0: @@ -819,8 +718,6 @@ define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_nxv64i1: ; CHECK: # %bb.0: @@ -835,8 +732,6 @@ define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_umin_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i1: ; CHECK: # %bb.0: @@ -851,8 +746,6 @@ define zeroext i1 @vpreduce_umin_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i1: ; CHECK: # %bb.0: @@ -867,8 +760,6 @@ define zeroext i1 @vpreduce_umin_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i1: ; CHECK: # %bb.0: @@ -883,8 +774,6 @@ define zeroext i1 @vpreduce_umin_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv8i1: ; CHECK: # %bb.0: @@ -899,8 +788,6 @@ define zeroext i1 @vpreduce_umin_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_umin_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv16i1: ; CHECK: # %bb.0: @@ -915,8 +802,6 @@ define zeroext i1 @vpreduce_umin_nxv16i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv32i1(i1, , , i32) - define zeroext i1 @vpreduce_umin_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv32i1: ; CHECK: # %bb.0: @@ -931,8 +816,6 @@ define zeroext i1 @vpreduce_umin_nxv32i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv64i1(i1, , , i32) - define zeroext i1 @vpreduce_umin_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv64i1: ; CHECK: # %bb.0: @@ -947,8 +830,6 @@ define zeroext i1 @vpreduce_umin_nxv64i1(i1 zeroext %s, %v, < ret i1 %r } -declare i1 @llvm.vp.reduce.mul.nxv1i1(i1, , , i32) - define zeroext i1 @vpreduce_mul_nxv1i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv1i1: ; CHECK: # %bb.0: @@ -963,8 +844,6 @@ define zeroext i1 @vpreduce_mul_nxv1i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv2i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv2i1: ; CHECK: # %bb.0: @@ -979,8 +858,6 @@ define zeroext i1 @vpreduce_mul_nxv2i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv4i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv4i1: ; CHECK: # %bb.0: @@ -995,8 +872,6 @@ define zeroext i1 @vpreduce_mul_nxv4i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv8i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv8i1: ; CHECK: # %bb.0: @@ -1011,8 +886,6 @@ define zeroext i1 @vpreduce_mul_nxv8i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv16i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv16i1: ; CHECK: # %bb.0: @@ -1027,8 +900,6 @@ define zeroext i1 @vpreduce_mul_nxv16i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv32i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv32i1: ; CHECK: # %bb.0: @@ -1043,8 +914,6 @@ define zeroext i1 @vpreduce_mul_nxv32i1(i1 zeroext %s, %v, , , i32) - define zeroext i1 @vpreduce_mul_nxv64i1(i1 zeroext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv64i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll index ce9d6c5ab91a8..cc829b32e12e9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -declare i1 @llvm.vector.reduce.or.nxv1i1() - define zeroext i1 @vreduce_or_nxv1i1( %v) { ; CHECK-LABEL: vreduce_or_nxv1i1: ; CHECK: # %bb.0: @@ -15,8 +13,6 @@ define zeroext i1 @vreduce_or_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv1i1() - define zeroext i1 @vreduce_xor_nxv1i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i1: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define zeroext i1 @vreduce_xor_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv1i1() - define zeroext i1 @vreduce_and_nxv1i1( %v) { ; CHECK-LABEL: vreduce_and_nxv1i1: ; CHECK: # %bb.0: @@ -42,8 +36,6 @@ define zeroext i1 @vreduce_and_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv1i1() - define zeroext i1 @vreduce_umax_nxv1i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i1: ; CHECK: # %bb.0: @@ -55,8 +47,6 @@ define zeroext i1 @vreduce_umax_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv1i1() - define zeroext i1 @vreduce_smax_nxv1i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i1: ; CHECK: # %bb.0: @@ -69,8 +59,6 @@ define zeroext i1 @vreduce_smax_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv1i1() - define zeroext i1 @vreduce_umin_nxv1i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i1: ; CHECK: # %bb.0: @@ -83,8 +71,6 @@ define zeroext i1 @vreduce_umin_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv1i1() - define zeroext i1 @vreduce_smin_nxv1i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i1: ; CHECK: # %bb.0: @@ -96,8 +82,6 @@ define zeroext i1 @vreduce_smin_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv2i1() - define zeroext i1 @vreduce_or_nxv2i1( %v) { ; CHECK-LABEL: vreduce_or_nxv2i1: ; CHECK: # %bb.0: @@ -109,8 +93,6 @@ define zeroext i1 @vreduce_or_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv2i1() - define zeroext i1 @vreduce_xor_nxv2i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i1: ; CHECK: # %bb.0: @@ -122,8 +104,6 @@ define zeroext i1 @vreduce_xor_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv2i1() - define zeroext i1 @vreduce_and_nxv2i1( %v) { ; CHECK-LABEL: vreduce_and_nxv2i1: ; CHECK: # %bb.0: @@ -136,8 +116,6 @@ define zeroext i1 @vreduce_and_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv2i1() - define zeroext i1 @vreduce_umax_nxv2i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i1: ; CHECK: # %bb.0: @@ -149,8 +127,6 @@ define zeroext i1 @vreduce_umax_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv2i1() - define zeroext i1 @vreduce_smax_nxv2i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i1: ; CHECK: # %bb.0: @@ -163,8 +139,6 @@ define zeroext i1 @vreduce_smax_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv2i1() - define zeroext i1 @vreduce_umin_nxv2i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i1: ; CHECK: # %bb.0: @@ -177,8 +151,6 @@ define zeroext i1 @vreduce_umin_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv2i1() - define zeroext i1 @vreduce_smin_nxv2i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i1: ; CHECK: # %bb.0: @@ -190,8 +162,6 @@ define zeroext i1 @vreduce_smin_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv4i1() - define zeroext i1 @vreduce_or_nxv4i1( %v) { ; CHECK-LABEL: vreduce_or_nxv4i1: ; CHECK: # %bb.0: @@ -203,8 +173,6 @@ define zeroext i1 @vreduce_or_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv4i1() - define zeroext i1 @vreduce_xor_nxv4i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i1: ; CHECK: # %bb.0: @@ -216,8 +184,6 @@ define zeroext i1 @vreduce_xor_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv4i1() - define zeroext i1 @vreduce_and_nxv4i1( %v) { ; CHECK-LABEL: vreduce_and_nxv4i1: ; CHECK: # %bb.0: @@ -230,8 +196,6 @@ define zeroext i1 @vreduce_and_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv4i1() - define zeroext i1 @vreduce_umax_nxv4i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i1: ; CHECK: # %bb.0: @@ -243,8 +207,6 @@ define zeroext i1 @vreduce_umax_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv4i1() - define zeroext i1 @vreduce_smax_nxv4i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i1: ; CHECK: # %bb.0: @@ -257,8 +219,6 @@ define zeroext i1 @vreduce_smax_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv4i1() - define zeroext i1 @vreduce_umin_nxv4i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i1: ; CHECK: # %bb.0: @@ -271,8 +231,6 @@ define zeroext i1 @vreduce_umin_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv4i1() - define zeroext i1 @vreduce_smin_nxv4i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i1: ; CHECK: # %bb.0: @@ -284,8 +242,6 @@ define zeroext i1 @vreduce_smin_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv8i1() - define zeroext i1 @vreduce_or_nxv8i1( %v) { ; CHECK-LABEL: vreduce_or_nxv8i1: ; CHECK: # %bb.0: @@ -297,8 +253,6 @@ define zeroext i1 @vreduce_or_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv8i1() - define zeroext i1 @vreduce_xor_nxv8i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv8i1: ; CHECK: # %bb.0: @@ -310,8 +264,6 @@ define zeroext i1 @vreduce_xor_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv8i1() - define zeroext i1 @vreduce_and_nxv8i1( %v) { ; CHECK-LABEL: vreduce_and_nxv8i1: ; CHECK: # %bb.0: @@ -324,8 +276,6 @@ define zeroext i1 @vreduce_and_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv8i1() - define zeroext i1 @vreduce_umax_nxv8i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv8i1: ; CHECK: # %bb.0: @@ -337,8 +287,6 @@ define zeroext i1 @vreduce_umax_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv8i1() - define zeroext i1 @vreduce_smax_nxv8i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv8i1: ; CHECK: # %bb.0: @@ -351,8 +299,6 @@ define zeroext i1 @vreduce_smax_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv8i1() - define zeroext i1 @vreduce_umin_nxv8i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv8i1: ; CHECK: # %bb.0: @@ -365,8 +311,6 @@ define zeroext i1 @vreduce_umin_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv8i1() - define zeroext i1 @vreduce_smin_nxv8i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv8i1: ; CHECK: # %bb.0: @@ -378,8 +322,6 @@ define zeroext i1 @vreduce_smin_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv16i1() - define zeroext i1 @vreduce_or_nxv16i1( %v) { ; CHECK-LABEL: vreduce_or_nxv16i1: ; CHECK: # %bb.0: @@ -391,8 +333,6 @@ define zeroext i1 @vreduce_or_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv16i1() - define zeroext i1 @vreduce_xor_nxv16i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv16i1: ; CHECK: # %bb.0: @@ -404,8 +344,6 @@ define zeroext i1 @vreduce_xor_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv16i1() - define zeroext i1 @vreduce_and_nxv16i1( %v) { ; CHECK-LABEL: vreduce_and_nxv16i1: ; CHECK: # %bb.0: @@ -418,8 +356,6 @@ define zeroext i1 @vreduce_and_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv16i1() - define zeroext i1 @vreduce_umax_nxv16i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv16i1: ; CHECK: # %bb.0: @@ -431,8 +367,6 @@ define zeroext i1 @vreduce_umax_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv16i1() - define zeroext i1 @vreduce_smax_nxv16i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv16i1: ; CHECK: # %bb.0: @@ -445,8 +379,6 @@ define zeroext i1 @vreduce_smax_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv16i1() - define zeroext i1 @vreduce_umin_nxv16i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv16i1: ; CHECK: # %bb.0: @@ -459,8 +391,6 @@ define zeroext i1 @vreduce_umin_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv16i1() - define zeroext i1 @vreduce_smin_nxv16i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv16i1: ; CHECK: # %bb.0: @@ -472,8 +402,6 @@ define zeroext i1 @vreduce_smin_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv32i1() - define zeroext i1 @vreduce_or_nxv32i1( %v) { ; CHECK-LABEL: vreduce_or_nxv32i1: ; CHECK: # %bb.0: @@ -485,8 +413,6 @@ define zeroext i1 @vreduce_or_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv32i1() - define zeroext i1 @vreduce_xor_nxv32i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv32i1: ; CHECK: # %bb.0: @@ -498,8 +424,6 @@ define zeroext i1 @vreduce_xor_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv32i1() - define zeroext i1 @vreduce_and_nxv32i1( %v) { ; CHECK-LABEL: vreduce_and_nxv32i1: ; CHECK: # %bb.0: @@ -512,8 +436,6 @@ define zeroext i1 @vreduce_and_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv32i1() - define zeroext i1 @vreduce_umax_nxv32i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv32i1: ; CHECK: # %bb.0: @@ -525,8 +447,6 @@ define zeroext i1 @vreduce_umax_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv32i1() - define zeroext i1 @vreduce_smax_nxv32i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv32i1: ; CHECK: # %bb.0: @@ -539,8 +459,6 @@ define zeroext i1 @vreduce_smax_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv32i1() - define zeroext i1 @vreduce_umin_nxv32i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv32i1: ; CHECK: # %bb.0: @@ -553,8 +471,6 @@ define zeroext i1 @vreduce_umin_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv32i1() - define zeroext i1 @vreduce_smin_nxv32i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv32i1: ; CHECK: # %bb.0: @@ -566,8 +482,6 @@ define zeroext i1 @vreduce_smin_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv64i1() - define zeroext i1 @vreduce_or_nxv64i1( %v) { ; CHECK-LABEL: vreduce_or_nxv64i1: ; CHECK: # %bb.0: @@ -579,8 +493,6 @@ define zeroext i1 @vreduce_or_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv64i1() - define zeroext i1 @vreduce_xor_nxv64i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv64i1: ; CHECK: # %bb.0: @@ -592,8 +504,6 @@ define zeroext i1 @vreduce_xor_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv64i1() - define zeroext i1 @vreduce_and_nxv64i1( %v) { ; CHECK-LABEL: vreduce_and_nxv64i1: ; CHECK: # %bb.0: @@ -606,8 +516,6 @@ define zeroext i1 @vreduce_and_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv64i1() - define zeroext i1 @vreduce_umax_nxv64i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv64i1: ; CHECK: # %bb.0: @@ -619,8 +527,6 @@ define zeroext i1 @vreduce_umax_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv64i1() - define zeroext i1 @vreduce_smax_nxv64i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv64i1: ; CHECK: # %bb.0: @@ -633,8 +539,6 @@ define zeroext i1 @vreduce_smax_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv64i1() - define zeroext i1 @vreduce_umin_nxv64i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv64i1: ; CHECK: # %bb.0: @@ -647,8 +551,6 @@ define zeroext i1 @vreduce_umin_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv64i1() - define zeroext i1 @vreduce_smin_nxv64i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv64i1: ; CHECK: # %bb.0: @@ -660,8 +562,6 @@ define zeroext i1 @vreduce_smin_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv1i1() - define zeroext i1 @vreduce_add_nxv1i1( %v) { ; CHECK-LABEL: vreduce_add_nxv1i1: ; CHECK: # %bb.0: @@ -673,8 +573,6 @@ define zeroext i1 @vreduce_add_nxv1i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv2i1() - define zeroext i1 @vreduce_add_nxv2i1( %v) { ; CHECK-LABEL: vreduce_add_nxv2i1: ; CHECK: # %bb.0: @@ -686,8 +584,6 @@ define zeroext i1 @vreduce_add_nxv2i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv4i1() - define zeroext i1 @vreduce_add_nxv4i1( %v) { ; CHECK-LABEL: vreduce_add_nxv4i1: ; CHECK: # %bb.0: @@ -699,8 +595,6 @@ define zeroext i1 @vreduce_add_nxv4i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv8i1() - define zeroext i1 @vreduce_add_nxv8i1( %v) { ; CHECK-LABEL: vreduce_add_nxv8i1: ; CHECK: # %bb.0: @@ -712,8 +606,6 @@ define zeroext i1 @vreduce_add_nxv8i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv16i1() - define zeroext i1 @vreduce_add_nxv16i1( %v) { ; CHECK-LABEL: vreduce_add_nxv16i1: ; CHECK: # %bb.0: @@ -725,8 +617,6 @@ define zeroext i1 @vreduce_add_nxv16i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv32i1() - define zeroext i1 @vreduce_add_nxv32i1( %v) { ; CHECK-LABEL: vreduce_add_nxv32i1: ; CHECK: # %bb.0: @@ -738,8 +628,6 @@ define zeroext i1 @vreduce_add_nxv32i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.add.nxv64i1() - define zeroext i1 @vreduce_add_nxv64i1( %v) { ; CHECK-LABEL: vreduce_add_nxv64i1: ; CHECK: # %bb.0: @@ -751,8 +639,6 @@ define zeroext i1 @vreduce_add_nxv64i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv128i1() - define zeroext i1 @vreduce_or_nxv128i1( %v) { ; CHECK-LABEL: vreduce_or_nxv128i1: ; CHECK: # %bb.0: @@ -765,8 +651,6 @@ define zeroext i1 @vreduce_or_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv128i1() - define zeroext i1 @vreduce_xor_nxv128i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv128i1: ; CHECK: # %bb.0: @@ -779,8 +663,6 @@ define zeroext i1 @vreduce_xor_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv128i1() - define zeroext i1 @vreduce_and_nxv128i1( %v) { ; CHECK-LABEL: vreduce_and_nxv128i1: ; CHECK: # %bb.0: @@ -793,8 +675,6 @@ define zeroext i1 @vreduce_and_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv128i1() - define zeroext i1 @vreduce_umax_nxv128i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv128i1: ; CHECK: # %bb.0: @@ -807,8 +687,6 @@ define zeroext i1 @vreduce_umax_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv128i1() - define zeroext i1 @vreduce_smax_nxv128i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv128i1: ; CHECK: # %bb.0: @@ -821,8 +699,6 @@ define zeroext i1 @vreduce_smax_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv128i1() - define zeroext i1 @vreduce_umin_nxv128i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv128i1: ; CHECK: # %bb.0: @@ -835,8 +711,6 @@ define zeroext i1 @vreduce_umin_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv128i1() - define zeroext i1 @vreduce_smin_nxv128i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv128i1: ; CHECK: # %bb.0: @@ -849,8 +723,6 @@ define zeroext i1 @vreduce_smin_nxv128i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv256i1() - define zeroext i1 @vreduce_or_nxv256i1( %v) { ; CHECK-LABEL: vreduce_or_nxv256i1: ; CHECK: # %bb.0: @@ -865,8 +737,6 @@ define zeroext i1 @vreduce_or_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv256i1() - define zeroext i1 @vreduce_xor_nxv256i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv256i1: ; CHECK: # %bb.0: @@ -881,8 +751,6 @@ define zeroext i1 @vreduce_xor_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv256i1() - define zeroext i1 @vreduce_and_nxv256i1( %v) { ; CHECK-LABEL: vreduce_and_nxv256i1: ; CHECK: # %bb.0: @@ -897,8 +765,6 @@ define zeroext i1 @vreduce_and_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv256i1() - define zeroext i1 @vreduce_umax_nxv256i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv256i1: ; CHECK: # %bb.0: @@ -913,8 +779,6 @@ define zeroext i1 @vreduce_umax_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv256i1() - define zeroext i1 @vreduce_smax_nxv256i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv256i1: ; CHECK: # %bb.0: @@ -929,8 +793,6 @@ define zeroext i1 @vreduce_smax_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv256i1() - define zeroext i1 @vreduce_umin_nxv256i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv256i1: ; CHECK: # %bb.0: @@ -945,8 +807,6 @@ define zeroext i1 @vreduce_umin_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv256i1() - define zeroext i1 @vreduce_smin_nxv256i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv256i1: ; CHECK: # %bb.0: @@ -961,8 +821,6 @@ define zeroext i1 @vreduce_smin_nxv256i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv512i1() - define zeroext i1 @vreduce_or_nxv512i1( %v) { ; CHECK-LABEL: vreduce_or_nxv512i1: ; CHECK: # %bb.0: @@ -981,8 +839,6 @@ define zeroext i1 @vreduce_or_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv512i1() - define zeroext i1 @vreduce_xor_nxv512i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv512i1: ; CHECK: # %bb.0: @@ -1001,8 +857,6 @@ define zeroext i1 @vreduce_xor_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv512i1() - define zeroext i1 @vreduce_and_nxv512i1( %v) { ; CHECK-LABEL: vreduce_and_nxv512i1: ; CHECK: # %bb.0: @@ -1021,8 +875,6 @@ define zeroext i1 @vreduce_and_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv512i1() - define zeroext i1 @vreduce_umax_nxv512i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv512i1: ; CHECK: # %bb.0: @@ -1041,8 +893,6 @@ define zeroext i1 @vreduce_umax_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv512i1() - define zeroext i1 @vreduce_smax_nxv512i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv512i1: ; CHECK: # %bb.0: @@ -1061,8 +911,6 @@ define zeroext i1 @vreduce_smax_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv512i1() - define zeroext i1 @vreduce_umin_nxv512i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv512i1: ; CHECK: # %bb.0: @@ -1081,8 +929,6 @@ define zeroext i1 @vreduce_umin_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv512i1() - define zeroext i1 @vreduce_smin_nxv512i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv512i1: ; CHECK: # %bb.0: @@ -1101,8 +947,6 @@ define zeroext i1 @vreduce_smin_nxv512i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.or.nxv1024i1() - define zeroext i1 @vreduce_or_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_or_nxv1024i1: ; CHECK: # %bb.0: @@ -1129,8 +973,6 @@ define zeroext i1 @vreduce_or_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.xor.nxv1024i1() - define zeroext i1 @vreduce_xor_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv1024i1: ; CHECK: # %bb.0: @@ -1157,8 +999,6 @@ define zeroext i1 @vreduce_xor_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.and.nxv1024i1() - define zeroext i1 @vreduce_and_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_and_nxv1024i1: ; CHECK: # %bb.0: @@ -1185,8 +1025,6 @@ define zeroext i1 @vreduce_and_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umax.nxv1024i1() - define zeroext i1 @vreduce_umax_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv1024i1: ; CHECK: # %bb.0: @@ -1213,8 +1051,6 @@ define zeroext i1 @vreduce_umax_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smax.nxv1024i1() - define zeroext i1 @vreduce_smax_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv1024i1: ; CHECK: # %bb.0: @@ -1241,8 +1077,6 @@ define zeroext i1 @vreduce_smax_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.umin.nxv1024i1() - define zeroext i1 @vreduce_umin_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv1024i1: ; CHECK: # %bb.0: @@ -1269,8 +1103,6 @@ define zeroext i1 @vreduce_umin_nxv1024i1( %v) { ret i1 %red } -declare i1 @llvm.vector.reduce.smin.nxv1024i1() - define zeroext i1 @vreduce_smin_nxv1024i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv1024i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor.ll index 31436cc1d0def..a4a9be617017c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vredxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vredxor.nxv8i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv8i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv4i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv2i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.nxv1i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll index 66ba2697fe5f6..b65663d30672f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.srem.nxv8i7(, , , i32) - define @vrem_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vrem_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.srem.nxv1i8(, , , i32) - define @vrem_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i8: ; CHECK: # %bb.0: @@ -69,8 +65,6 @@ define @vrem_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv2i8(, , , i32) - define @vrem_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i8: ; CHECK: # %bb.0: @@ -115,8 +109,6 @@ define @vrem_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv3i8(, , , i32) - define @vrem_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv3i8: ; CHECK: # %bb.0: @@ -127,8 +119,6 @@ define @vrem_vv_nxv3i8( %va, %v } -declare @llvm.vp.srem.nxv4i8(, , , i32) - define @vrem_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i8: ; CHECK: # %bb.0: @@ -173,8 +163,6 @@ define @vrem_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv8i8(, , , i32) - define @vrem_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i8: ; CHECK: # %bb.0: @@ -219,8 +207,6 @@ define @vrem_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.srem.nxv16i8(, , , i32) - define @vrem_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i8: ; CHECK: # %bb.0: @@ -265,8 +251,6 @@ define @vrem_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.srem.nxv32i8(, , , i32) - define @vrem_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i8: ; CHECK: # %bb.0: @@ -311,8 +295,6 @@ define @vrem_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.srem.nxv64i8(, , , i32) - define @vrem_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv64i8: ; CHECK: # %bb.0: @@ -357,8 +339,6 @@ define @vrem_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.srem.nxv1i16(, , , i32) - define @vrem_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i16: ; CHECK: # %bb.0: @@ -403,8 +383,6 @@ define @vrem_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv2i16(, , , i32) - define @vrem_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i16: ; CHECK: # %bb.0: @@ -449,8 +427,6 @@ define @vrem_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv4i16(, , , i32) - define @vrem_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i16: ; CHECK: # %bb.0: @@ -495,8 +471,6 @@ define @vrem_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv8i16(, , , i32) - define @vrem_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i16: ; CHECK: # %bb.0: @@ -541,8 +515,6 @@ define @vrem_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.srem.nxv16i16(, , , i32) - define @vrem_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i16: ; CHECK: # %bb.0: @@ -587,8 +559,6 @@ define @vrem_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.srem.nxv32i16(, , , i32) - define @vrem_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i16: ; CHECK: # %bb.0: @@ -633,8 +603,6 @@ define @vrem_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.srem.nxv1i32(, , , i32) - define @vrem_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i32: ; CHECK: # %bb.0: @@ -679,8 +647,6 @@ define @vrem_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv2i32(, , , i32) - define @vrem_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i32: ; CHECK: # %bb.0: @@ -725,8 +691,6 @@ define @vrem_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv4i32(, , , i32) - define @vrem_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i32: ; CHECK: # %bb.0: @@ -771,8 +735,6 @@ define @vrem_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv8i32(, , , i32) - define @vrem_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i32: ; CHECK: # %bb.0: @@ -817,8 +779,6 @@ define @vrem_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.srem.nxv16i32(, , , i32) - define @vrem_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i32: ; CHECK: # %bb.0: @@ -863,8 +823,6 @@ define @vrem_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.srem.nxv1i64(, , , i32) - define @vrem_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i64: ; CHECK: # %bb.0: @@ -937,8 +895,6 @@ define @vrem_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.srem.nxv2i64(, , , i32) - define @vrem_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1011,8 +967,6 @@ define @vrem_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.srem.nxv4i64(, , , i32) - define @vrem_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1085,8 +1039,6 @@ define @vrem_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.srem.nxv8i64(, , , i32) - define @vrem_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem.ll b/llvm/test/CodeGen/RISCV/rvv/vrem.ll index d18b939823a23..a9b1cef3984e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vrem.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vrem.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll index 4608661eb5df3..51448f2050b6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.urem.nxv8i7(, , , i32) - define @vremu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vremu_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.urem.nxv1i8(, , , i32) - define @vremu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -68,8 +64,6 @@ define @vremu_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv2i8(, , , i32) - define @vremu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -114,8 +108,6 @@ define @vremu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv3i8(, , , i32) - define @vremu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -126,8 +118,6 @@ define @vremu_vv_nxv3i8( %va, %v } -declare @llvm.vp.urem.nxv4i8(, , , i32) - define @vremu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -172,8 +162,6 @@ define @vremu_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv8i8(, , , i32) - define @vremu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -218,8 +206,6 @@ define @vremu_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.urem.nxv16i8(, , , i32) - define @vremu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -264,8 +250,6 @@ define @vremu_vx_nxv16i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.urem.nxv32i8(, , , i32) - define @vremu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -310,8 +294,6 @@ define @vremu_vx_nxv32i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.urem.nxv64i8(, , , i32) - define @vremu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -356,8 +338,6 @@ define @vremu_vx_nxv64i8_unmasked( %va, i8 ret %v } -declare @llvm.vp.urem.nxv1i16(, , , i32) - define @vremu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -402,8 +382,6 @@ define @vremu_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv2i16(, , , i32) - define @vremu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -448,8 +426,6 @@ define @vremu_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv4i16(, , , i32) - define @vremu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -494,8 +470,6 @@ define @vremu_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv8i16(, , , i32) - define @vremu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -540,8 +514,6 @@ define @vremu_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.urem.nxv16i16(, , , i32) - define @vremu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -586,8 +558,6 @@ define @vremu_vx_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.urem.nxv32i16(, , , i32) - define @vremu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -632,8 +602,6 @@ define @vremu_vx_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.urem.nxv1i32(, , , i32) - define @vremu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -678,8 +646,6 @@ define @vremu_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv2i32(, , , i32) - define @vremu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -724,8 +690,6 @@ define @vremu_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv4i32(, , , i32) - define @vremu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -770,8 +734,6 @@ define @vremu_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv8i32(, , , i32) - define @vremu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -816,8 +778,6 @@ define @vremu_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.urem.nxv16i32(, , , i32) - define @vremu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -862,8 +822,6 @@ define @vremu_vx_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.urem.nxv1i64(, , , i32) - define @vremu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -936,8 +894,6 @@ define @vremu_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.urem.nxv2i64(, , , i32) - define @vremu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1010,8 +966,6 @@ define @vremu_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.urem.nxv4i64(, , , i32) - define @vremu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1084,8 +1038,6 @@ define @vremu_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.urem.nxv8i64(, , , i32) - define @vremu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu.ll b/llvm/test/CodeGen/RISCV/rvv/vremu.ll index 138232c103da0..c1a49f45e5d5b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vremu.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vremu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll index b6588bceceb37..b94b3490dc835 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vrev8.nxv1i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -24,13 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -47,11 +35,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -67,13 +50,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -90,11 +66,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +81,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -133,11 +97,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -153,13 +112,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -176,11 +128,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv16i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -196,13 +143,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,11 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv32i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,11 +190,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv64i8( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv64i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -282,13 +205,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv64i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -305,11 +221,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv1i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -325,13 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -348,11 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +267,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,11 +283,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,13 +298,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -434,11 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,13 +329,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -477,11 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv16i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +360,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -520,11 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv32i16( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -540,13 +391,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv32i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -563,11 +407,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv1i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -583,13 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -606,11 +438,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -649,11 +469,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -669,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -692,11 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -735,11 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv16i32( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -755,13 +546,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv16i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -778,11 +562,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv1i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -798,13 +577,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv1i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -821,11 +593,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv2i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -841,13 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv2i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -864,11 +624,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv4i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -884,13 +639,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv4i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -907,11 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.nxv8i64( - , - , - iXLen); - define @intrinsic_vrev8_vs_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,13 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vrev8.mask.nxv8i64( - , - , - , - iXLen, - iXLen); - define @intrinsic_vrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll index 91b95a96050d2..9813e4a7533b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vrgather.vv.nxv1i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv64i8.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv64i8.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -341,12 +243,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -364,14 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -389,12 +277,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -412,14 +294,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -437,12 +311,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -460,14 +328,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -485,12 +345,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -508,14 +362,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -533,12 +379,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -556,14 +396,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32i16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,14 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32i16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -630,12 +448,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -653,14 +465,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -678,12 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -701,14 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -726,12 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -749,14 +533,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -797,14 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -822,12 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16i32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -845,14 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16i32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -871,12 +619,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -894,14 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -919,12 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -942,14 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -967,12 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -990,14 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1015,12 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8i64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1038,14 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8i64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1064,12 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1087,14 +773,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1112,12 +790,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1135,14 +807,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1160,12 +824,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1183,14 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1208,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1231,14 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1256,12 +892,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1279,14 +909,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1304,12 +926,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32f16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1327,14 +943,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32f16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1353,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1376,14 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1401,12 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1424,14 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1449,12 +1029,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1472,14 +1046,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1497,12 +1063,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1520,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1545,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16f32.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1568,14 +1114,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16f32.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1594,12 +1132,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1617,14 +1149,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1642,12 +1166,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1665,14 +1183,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1690,12 +1200,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1713,14 +1217,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1738,12 +1234,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8f64.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1761,14 +1251,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8f64.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1787,12 +1269,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1810,14 +1286,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1835,12 +1303,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1858,14 +1320,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1883,12 +1337,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1906,14 +1354,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1931,12 +1371,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1954,14 +1388,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1979,12 +1405,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2002,14 +1422,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2027,12 +1439,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -2050,14 +1456,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -2075,12 +1473,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv64i8.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -2098,14 +1490,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -2123,12 +1507,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2146,14 +1524,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2171,12 +1541,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2194,14 +1558,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2219,12 +1575,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2242,14 +1592,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2267,12 +1609,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2290,14 +1626,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2315,12 +1643,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2338,14 +1660,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2363,12 +1677,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32i16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2386,14 +1694,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2411,12 +1711,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2434,14 +1728,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2459,12 +1745,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2482,14 +1762,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2507,12 +1779,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2530,14 +1796,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2555,12 +1813,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2578,14 +1830,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2603,12 +1847,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16i32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2626,14 +1864,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2651,12 +1881,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2674,14 +1898,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2699,12 +1915,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2722,14 +1932,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2747,12 +1949,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2770,14 +1966,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2795,12 +1983,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8i64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2818,14 +2000,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2843,12 +2017,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1f16_nxv1f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2866,14 +2034,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -2891,12 +2051,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2f16_nxv2f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -2914,14 +2068,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -2939,12 +2085,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4f16_nxv4f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -2962,14 +2102,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -2987,12 +2119,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8f16_nxv8f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -3010,14 +2136,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -3035,12 +2153,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16f16_nxv16f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -3058,14 +2170,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -3083,12 +2187,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32f16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32f16_nxv32f16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -3106,14 +2204,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -3131,12 +2221,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1f32_nxv1f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -3154,14 +2238,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -3179,12 +2255,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2f32_nxv2f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -3202,14 +2272,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -3227,12 +2289,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4f32_nxv4f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -3250,14 +2306,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -3275,12 +2323,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8f32_nxv8f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -3298,14 +2340,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -3323,12 +2357,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16f32.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16f32_nxv16f32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -3346,14 +2374,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -3371,12 +2391,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1f64_nxv1f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -3394,14 +2408,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -3419,12 +2425,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2f64_nxv2f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -3442,14 +2442,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -3467,12 +2459,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4f64_nxv4f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -3490,14 +2476,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -3515,12 +2493,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8f64.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8f64_nxv8f64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -3538,14 +2510,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -4821,12 +3785,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv1bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4844,14 +3802,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4869,12 +3819,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv2bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4892,14 +3836,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4917,12 +3853,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv4bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4940,14 +3870,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4965,12 +3887,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv8bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4988,14 +3904,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5013,12 +3921,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv16bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -5036,14 +3938,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -5061,12 +3955,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.nxv32bf16.iXLen( - , - , - , - iXLen) - define @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -5084,14 +3972,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -5110,12 +3990,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv1bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -5133,14 +4007,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -5158,12 +4024,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv2bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -5181,14 +4041,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -5206,12 +4058,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv4bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -5229,14 +4075,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -5254,12 +4092,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv8bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -5277,14 +4109,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -5302,12 +4126,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv16bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -5325,14 +4143,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -5350,12 +4160,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.nxv32bf16.iXLen( - , - , - iXLen, - iXLen) - define @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -5373,14 +4177,6 @@ entry: ret %a } -declare @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll index 7b460f2c058f8..737140783480b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll @@ -62,11 +62,3 @@ loopIR3.i.i: ; preds = %loopIR3.i.i, %loopI br label %loopIR3.i.i } -; Function Attrs: nocallback nofree nosync nounwind readnone willreturn -declare @llvm.vector.insert.nxv8i16.nxv1i16(, , i64 immarg) #0 - -; Function Attrs: nounwind readnone -declare @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(, , , i64) #1 - -attributes #0 = { nocallback nofree nosync nounwind readnone willreturn } -attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll index b3f36e4420a6c..1b08999f8e4bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vrgatherei16.vv.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -581,12 +413,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -604,14 +430,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -629,12 +447,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -652,14 +464,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -677,12 +481,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -700,14 +498,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -725,12 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -748,14 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -774,12 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -797,14 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -822,12 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -845,14 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -871,12 +619,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -894,14 +636,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -919,12 +653,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv2f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -942,14 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv2f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -967,12 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -990,14 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1015,12 +721,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1038,14 +738,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1063,12 +755,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1086,14 +772,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1111,12 +789,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv32f16( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1134,14 +806,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv32f16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1160,12 +824,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv1f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1183,14 +841,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv1f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1208,12 +858,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1231,14 +875,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -1256,12 +892,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1279,14 +909,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -1304,12 +926,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv16f32( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -1327,14 +943,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv16f32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -1353,12 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv4f64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1376,14 +978,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv4f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -1401,12 +995,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.nxv8f64( - , - , - , - iXLen); - define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -1424,14 +1012,6 @@ entry: ret %a } -declare @llvm.riscv.vrgatherei16.vv.mask.nxv8f64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll index eb129da2697b6..ade68af6cd3a5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB -declare @llvm.fshl.nxv1i8(, , ) - define @vrol_vv_nxv1i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i8: ; CHECK: # %bb.0: @@ -51,8 +49,6 @@ define @vrol_vx_nxv1i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv2i8(, , ) - define @vrol_vv_nxv2i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i8: ; CHECK: # %bb.0: @@ -98,8 +94,6 @@ define @vrol_vx_nxv2i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv4i8(, , ) - define @vrol_vv_nxv4i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i8: ; CHECK: # %bb.0: @@ -145,8 +139,6 @@ define @vrol_vx_nxv4i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv8i8(, , ) - define @vrol_vv_nxv8i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i8: ; CHECK: # %bb.0: @@ -192,8 +184,6 @@ define @vrol_vx_nxv8i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv16i8(, , ) - define @vrol_vv_nxv16i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv16i8: ; CHECK: # %bb.0: @@ -239,8 +229,6 @@ define @vrol_vx_nxv16i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv32i8(, , ) - define @vrol_vv_nxv32i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv32i8: ; CHECK: # %bb.0: @@ -286,8 +274,6 @@ define @vrol_vx_nxv32i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv64i8(, , ) - define @vrol_vv_nxv64i8( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv64i8: ; CHECK: # %bb.0: @@ -333,8 +319,6 @@ define @vrol_vx_nxv64i8( %a, i8 %b) { ret %x } -declare @llvm.fshl.nxv1i16(, , ) - define @vrol_vv_nxv1i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i16: ; CHECK: # %bb.0: @@ -380,8 +364,6 @@ define @vrol_vx_nxv1i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv2i16(, , ) - define @vrol_vv_nxv2i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i16: ; CHECK: # %bb.0: @@ -427,8 +409,6 @@ define @vrol_vx_nxv2i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv4i16(, , ) - define @vrol_vv_nxv4i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i16: ; CHECK: # %bb.0: @@ -474,8 +454,6 @@ define @vrol_vx_nxv4i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv8i16(, , ) - define @vrol_vv_nxv8i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i16: ; CHECK: # %bb.0: @@ -521,8 +499,6 @@ define @vrol_vx_nxv8i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv16i16(, , ) - define @vrol_vv_nxv16i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv16i16: ; CHECK: # %bb.0: @@ -568,8 +544,6 @@ define @vrol_vx_nxv16i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv32i16(, , ) - define @vrol_vv_nxv32i16( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv32i16: ; CHECK: # %bb.0: @@ -615,8 +589,6 @@ define @vrol_vx_nxv32i16( %a, i16 %b) { ret %x } -declare @llvm.fshl.nxv1i32(, , ) - define @vrol_vv_nxv1i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i32: ; CHECK: # %bb.0: @@ -675,8 +647,6 @@ define @vrol_vx_nxv1i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv2i32(, , ) - define @vrol_vv_nxv2i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i32: ; CHECK: # %bb.0: @@ -735,8 +705,6 @@ define @vrol_vx_nxv2i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv4i32(, , ) - define @vrol_vv_nxv4i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i32: ; CHECK: # %bb.0: @@ -795,8 +763,6 @@ define @vrol_vx_nxv4i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv8i32(, , ) - define @vrol_vv_nxv8i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i32: ; CHECK: # %bb.0: @@ -855,8 +821,6 @@ define @vrol_vx_nxv8i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv16i32(, , ) - define @vrol_vv_nxv16i32( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv16i32: ; CHECK: # %bb.0: @@ -915,8 +879,6 @@ define @vrol_vx_nxv16i32( %a, i32 %b) { ret %x } -declare @llvm.fshl.nxv1i64(, , ) - define @vrol_vv_nxv1i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv1i64: ; CHECK: # %bb.0: @@ -978,8 +940,6 @@ define @vrol_vx_nxv1i64( %a, i64 %b) { ret %x } -declare @llvm.fshl.nxv2i64(, , ) - define @vrol_vv_nxv2i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1041,8 +1001,6 @@ define @vrol_vx_nxv2i64( %a, i64 %b) { ret %x } -declare @llvm.fshl.nxv4i64(, , ) - define @vrol_vv_nxv4i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1104,8 +1062,6 @@ define @vrol_vx_nxv4i64( %a, i64 %b) { ret %x } -declare @llvm.fshl.nxv8i64(, , ) - define @vrol_vv_nxv8i64( %a, %b) { ; CHECK-LABEL: vrol_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol.ll b/llvm/test/CodeGen/RISCV/rvv/vrol.ll index 5d3ac576eb4ed..959bb96781621 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrol.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrol.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vrol.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vrol_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv64i8( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv32i16( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv16i32( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv1i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv2i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv4i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.nxv8i64( - , - , - iXLen, - iXLen) - define @intrinsic_vrol_vx_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrol_vx_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vrol.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vrol_mask_vx_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll index 97524ac61b96e..ddf6c530e3f2f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVKB -declare @llvm.fshr.nxv1i8(, , ) -declare @llvm.fshl.nxv1i8(, , ) - define @vror_vv_nxv1i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i8: ; CHECK: # %bb.0: @@ -88,9 +85,6 @@ define @vror_vi_rotl_nxv1i8( %a) { ret %x } -declare @llvm.fshr.nxv2i8(, , ) -declare @llvm.fshl.nxv2i8(, , ) - define @vror_vv_nxv2i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i8: ; CHECK: # %bb.0: @@ -172,9 +166,6 @@ define @vror_vi_rotl_nxv2i8( %a) { ret %x } -declare @llvm.fshr.nxv4i8(, , ) -declare @llvm.fshl.nxv4i8(, , ) - define @vror_vv_nxv4i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i8: ; CHECK: # %bb.0: @@ -256,9 +247,6 @@ define @vror_vi_rotl_nxv4i8( %a) { ret %x } -declare @llvm.fshr.nxv8i8(, , ) -declare @llvm.fshl.nxv8i8(, , ) - define @vror_vv_nxv8i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i8: ; CHECK: # %bb.0: @@ -340,9 +328,6 @@ define @vror_vi_rotl_nxv8i8( %a) { ret %x } -declare @llvm.fshr.nxv16i8(, , ) -declare @llvm.fshl.nxv16i8(, , ) - define @vror_vv_nxv16i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv16i8: ; CHECK: # %bb.0: @@ -424,9 +409,6 @@ define @vror_vi_rotl_nxv16i8( %a) { ret %x } -declare @llvm.fshr.nxv32i8(, , ) -declare @llvm.fshl.nxv32i8(, , ) - define @vror_vv_nxv32i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv32i8: ; CHECK: # %bb.0: @@ -508,9 +490,6 @@ define @vror_vi_rotl_nxv32i8( %a) { ret %x } -declare @llvm.fshr.nxv64i8(, , ) -declare @llvm.fshl.nxv64i8(, , ) - define @vror_vv_nxv64i8( %a, %b) { ; CHECK-LABEL: vror_vv_nxv64i8: ; CHECK: # %bb.0: @@ -592,9 +571,6 @@ define @vror_vi_rotl_nxv64i8( %a) { ret %x } -declare @llvm.fshr.nxv1i16(, , ) -declare @llvm.fshl.nxv1i16(, , ) - define @vror_vv_nxv1i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i16: ; CHECK: # %bb.0: @@ -676,9 +652,6 @@ define @vror_vi_rotl_nxv1i16( %a) { ret %x } -declare @llvm.fshr.nxv2i16(, , ) -declare @llvm.fshl.nxv2i16(, , ) - define @vror_vv_nxv2i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i16: ; CHECK: # %bb.0: @@ -760,9 +733,6 @@ define @vror_vi_rotl_nxv2i16( %a) { ret %x } -declare @llvm.fshr.nxv4i16(, , ) -declare @llvm.fshl.nxv4i16(, , ) - define @vror_vv_nxv4i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i16: ; CHECK: # %bb.0: @@ -844,9 +814,6 @@ define @vror_vi_rotl_nxv4i16( %a) { ret %x } -declare @llvm.fshr.nxv8i16(, , ) -declare @llvm.fshl.nxv8i16(, , ) - define @vror_vv_nxv8i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i16: ; CHECK: # %bb.0: @@ -928,9 +895,6 @@ define @vror_vi_rotl_nxv8i16( %a) { ret %x } -declare @llvm.fshr.nxv16i16(, , ) -declare @llvm.fshl.nxv16i16(, , ) - define @vror_vv_nxv16i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1012,9 +976,6 @@ define @vror_vi_rotl_nxv16i16( %a) { ret %x } -declare @llvm.fshr.nxv32i16(, , ) -declare @llvm.fshl.nxv32i16(, , ) - define @vror_vv_nxv32i16( %a, %b) { ; CHECK-LABEL: vror_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1096,9 +1057,6 @@ define @vror_vi_rotl_nxv32i16( %a) { ret %x } -declare @llvm.fshr.nxv1i32(, , ) -declare @llvm.fshl.nxv1i32(, , ) - define @vror_vv_nxv1i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1193,9 +1151,6 @@ define @vror_vi_rotl_nxv1i32( %a) { ret %x } -declare @llvm.fshr.nxv2i32(, , ) -declare @llvm.fshl.nxv2i32(, , ) - define @vror_vv_nxv2i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1290,9 +1245,6 @@ define @vror_vi_rotl_nxv2i32( %a) { ret %x } -declare @llvm.fshr.nxv4i32(, , ) -declare @llvm.fshl.nxv4i32(, , ) - define @vror_vv_nxv4i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1387,9 +1339,6 @@ define @vror_vi_rotl_nxv4i32( %a) { ret %x } -declare @llvm.fshr.nxv8i32(, , ) -declare @llvm.fshl.nxv8i32(, , ) - define @vror_vv_nxv8i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1484,9 +1433,6 @@ define @vror_vi_rotl_nxv8i32( %a) { ret %x } -declare @llvm.fshr.nxv16i32(, , ) -declare @llvm.fshl.nxv16i32(, , ) - define @vror_vv_nxv16i32( %a, %b) { ; CHECK-LABEL: vror_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1581,9 +1527,6 @@ define @vror_vi_rotl_nxv16i32( %a) { ret %x } -declare @llvm.fshr.nxv1i64(, , ) -declare @llvm.fshl.nxv1i64(, , ) - define @vror_vv_nxv1i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1683,9 +1626,6 @@ define @vror_vi_rotl_nxv1i64( %a) { ret %x } -declare @llvm.fshr.nxv2i64(, , ) -declare @llvm.fshl.nxv2i64(, , ) - define @vror_vv_nxv2i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1785,9 +1725,6 @@ define @vror_vi_rotl_nxv2i64( %a) { ret %x } -declare @llvm.fshr.nxv4i64(, , ) -declare @llvm.fshl.nxv4i64(, , ) - define @vror_vv_nxv4i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1887,9 +1824,6 @@ define @vror_vi_rotl_nxv4i64( %a) { ret %x } -declare @llvm.fshr.nxv8i64(, , ) -declare @llvm.fshl.nxv8i64(, , ) - define @vror_vv_nxv8i64( %a, %b) { ; CHECK-LABEL: vror_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vror.ll b/llvm/test/CodeGen/RISCV/rvv/vror.ll index 4e5734310daef..1f3114ab152e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vror.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vror.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vror.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vror_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv64i8( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv32i16( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv16i32( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv1i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv2i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv4i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vror.nxv8i64( - , - , - iXLen, - iXLen) - define @intrinsic_vror_vx_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vror_vx_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vror.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen) - define @intrinsic_vror_mask_vx_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll index c41139c64eb08..451a9b47a89f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sub.nxv1i8(, , , i32) - define @vrsub_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i8: ; CHECK: # %bb.0: @@ -50,8 +48,6 @@ define @vrsub_vi_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv2i8(, , , i32) - define @vrsub_vx_nxv2i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i8: ; CHECK: # %bb.0: @@ -96,8 +92,6 @@ define @vrsub_vi_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv4i8(, , , i32) - define @vrsub_vx_nxv4i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i8: ; CHECK: # %bb.0: @@ -142,8 +136,6 @@ define @vrsub_vi_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv8i8(, , , i32) - define @vrsub_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i8: ; CHECK: # %bb.0: @@ -188,8 +180,6 @@ define @vrsub_vi_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sub.nxv16i8(, , , i32) - define @vrsub_vx_nxv16i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i8: ; CHECK: # %bb.0: @@ -234,8 +224,6 @@ define @vrsub_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv32i8(, , , i32) - define @vrsub_vx_nxv32i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i8: ; CHECK: # %bb.0: @@ -280,8 +268,6 @@ define @vrsub_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv64i8(, , , i32) - define @vrsub_vx_nxv64i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv64i8: ; CHECK: # %bb.0: @@ -326,8 +312,6 @@ define @vrsub_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv1i16(, , , i32) - define @vrsub_vx_nxv1i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i16: ; CHECK: # %bb.0: @@ -372,8 +356,6 @@ define @vrsub_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i16(, , , i32) - define @vrsub_vx_nxv2i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i16: ; CHECK: # %bb.0: @@ -418,8 +400,6 @@ define @vrsub_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i16(, , , i32) - define @vrsub_vx_nxv4i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i16: ; CHECK: # %bb.0: @@ -464,8 +444,6 @@ define @vrsub_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i16(, , , i32) - define @vrsub_vx_nxv8i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i16: ; CHECK: # %bb.0: @@ -510,8 +488,6 @@ define @vrsub_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv16i16(, , , i32) - define @vrsub_vx_nxv16i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i16: ; CHECK: # %bb.0: @@ -556,8 +532,6 @@ define @vrsub_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.sub.nxv32i16(, , , i32) - define @vrsub_vx_nxv32i16( %va, i16 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i16: ; CHECK: # %bb.0: @@ -602,8 +576,6 @@ define @vrsub_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.sub.nxv1i32(, , , i32) - define @vrsub_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i32: ; CHECK: # %bb.0: @@ -648,8 +620,6 @@ define @vrsub_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i32(, , , i32) - define @vrsub_vx_nxv2i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i32: ; CHECK: # %bb.0: @@ -694,8 +664,6 @@ define @vrsub_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i32(, , , i32) - define @vrsub_vx_nxv4i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i32: ; CHECK: # %bb.0: @@ -740,8 +708,6 @@ define @vrsub_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i32(, , , i32) - define @vrsub_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i32: ; CHECK: # %bb.0: @@ -786,8 +752,6 @@ define @vrsub_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv16i32(, , , i32) - define @vrsub_vx_nxv16i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i32: ; CHECK: # %bb.0: @@ -832,8 +796,6 @@ define @vrsub_vi_nxv16i32_unmasked( %va, ret %v } -declare @llvm.vp.sub.nxv1i64(, , , i32) - define @vrsub_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv1i64: ; RV32: # %bb.0: @@ -906,8 +868,6 @@ define @vrsub_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i64(, , , i32) - define @vrsub_vx_nxv2i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv2i64: ; RV32: # %bb.0: @@ -980,8 +940,6 @@ define @vrsub_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i64(, , , i32) - define @vrsub_vx_nxv4i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv4i64: ; RV32: # %bb.0: @@ -1054,8 +1012,6 @@ define @vrsub_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i64(, , , i32) - define @vrsub_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { ; RV32-LABEL: vrsub_vx_nxv8i64: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll index bab0e8fa0bff3..d3f2c9a24b420 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vrsub.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -866,13 +626,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -902,12 +655,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -936,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -972,12 +712,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1006,13 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1042,12 +769,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1076,13 +797,6 @@ entry: ret %a } -declare @llvm.riscv.vrsub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll index 15af5c418b413..88b358c66b504 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.sadd.sat.nxv1i8(, ) - define @sadd_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define @sadd_nxv1i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i8(, ) - define @sadd_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define @sadd_nxv2i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i8(, ) - define @sadd_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define @sadd_nxv4i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i8(, ) - define @sadd_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @sadd_nxv8i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv16i8(, ) - define @sadd_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i8_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define @sadd_nxv16i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv32i8(, ) - define @sadd_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv32i8_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define @sadd_nxv32i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv64i8(, ) - define @sadd_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv64i8_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define @sadd_nxv64i8_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv1i16(, ) - define @sadd_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define @sadd_nxv1i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i16(, ) - define @sadd_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i16_vv: ; CHECK: # %bb.0: @@ -310,8 +292,6 @@ define @sadd_nxv2i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i16(, ) - define @sadd_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i16_vv: ; CHECK: # %bb.0: @@ -344,8 +324,6 @@ define @sadd_nxv4i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i16(, ) - define @sadd_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i16_vv: ; CHECK: # %bb.0: @@ -378,8 +356,6 @@ define @sadd_nxv8i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv16i16(, ) - define @sadd_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i16_vv: ; CHECK: # %bb.0: @@ -412,8 +388,6 @@ define @sadd_nxv16i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv32i16(, ) - define @sadd_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv32i16_vv: ; CHECK: # %bb.0: @@ -446,8 +420,6 @@ define @sadd_nxv32i16_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv1i32(, ) - define @sadd_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i32_vv: ; CHECK: # %bb.0: @@ -480,8 +452,6 @@ define @sadd_nxv1i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i32(, ) - define @sadd_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i32_vv: ; CHECK: # %bb.0: @@ -514,8 +484,6 @@ define @sadd_nxv2i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i32(, ) - define @sadd_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i32_vv: ; CHECK: # %bb.0: @@ -548,8 +516,6 @@ define @sadd_nxv4i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i32(, ) - define @sadd_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i32_vv: ; CHECK: # %bb.0: @@ -582,8 +548,6 @@ define @sadd_nxv8i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv16i32(, ) - define @sadd_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i32_vv: ; CHECK: # %bb.0: @@ -616,8 +580,6 @@ define @sadd_nxv16i32_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv1i64(, ) - define @sadd_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i64_vv: ; CHECK: # %bb.0: @@ -664,8 +626,6 @@ define @sadd_nxv1i64_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv2i64(, ) - define @sadd_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i64_vv: ; CHECK: # %bb.0: @@ -712,8 +672,6 @@ define @sadd_nxv2i64_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv4i64(, ) - define @sadd_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i64_vv: ; CHECK: # %bb.0: @@ -760,8 +718,6 @@ define @sadd_nxv4i64_vi( %va) { ret %v } -declare @llvm.sadd.sat.nxv8i64(, ) - define @sadd_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll index e471f4b2e92b5..98634fe55de41 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sadd.sat.nxv8i7(, , , i32) - define @vsadd_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vx_nxv8i7: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define @vsadd_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.sadd.sat.nxv1i8(, , , i32) - define @vsadd_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i8: ; CHECK: # %bb.0: @@ -102,8 +98,6 @@ define @vsadd_vi_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv2i8(, , , i32) - define @vsadd_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i8: ; CHECK: # %bb.0: @@ -168,8 +162,6 @@ define @vsadd_vi_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv3i8(, , , i32) - define @vsadd_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv3i8: ; CHECK: # %bb.0: @@ -234,8 +226,6 @@ define @vsadd_vi_nxv3i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv4i8(, , , i32) - define @vsadd_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i8: ; CHECK: # %bb.0: @@ -300,8 +290,6 @@ define @vsadd_vi_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv8i8(, , , i32) - define @vsadd_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i8: ; CHECK: # %bb.0: @@ -366,8 +354,6 @@ define @vsadd_vi_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.sadd.sat.nxv16i8(, , , i32) - define @vsadd_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv16i8: ; CHECK: # %bb.0: @@ -432,8 +418,6 @@ define @vsadd_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv32i8(, , , i32) - define @vsadd_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv32i8: ; CHECK: # %bb.0: @@ -498,8 +482,6 @@ define @vsadd_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv64i8(, , , i32) - define @vsadd_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv64i8: ; CHECK: # %bb.0: @@ -566,8 +548,6 @@ define @vsadd_vi_nxv64i8_unmasked( %va, i32 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.sadd.sat.nxv128i8(, , , i32) - define @vsadd_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_nxv128i8: ; CHECK: # %bb.0: @@ -616,8 +596,6 @@ define @vsadd_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv1i16(, , , i32) - define @vsadd_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i16: ; CHECK: # %bb.0: @@ -682,8 +660,6 @@ define @vsadd_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv2i16(, , , i32) - define @vsadd_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i16: ; CHECK: # %bb.0: @@ -748,8 +724,6 @@ define @vsadd_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv4i16(, , , i32) - define @vsadd_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i16: ; CHECK: # %bb.0: @@ -814,8 +788,6 @@ define @vsadd_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv8i16(, , , i32) - define @vsadd_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i16: ; CHECK: # %bb.0: @@ -880,8 +852,6 @@ define @vsadd_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv16i16(, , , i32) - define @vsadd_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv16i16: ; CHECK: # %bb.0: @@ -946,8 +916,6 @@ define @vsadd_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv32i16(, , , i32) - define @vsadd_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1012,8 +980,6 @@ define @vsadd_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv1i32(, , , i32) - define @vsadd_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1078,8 +1044,6 @@ define @vsadd_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv2i32(, , , i32) - define @vsadd_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1144,8 +1108,6 @@ define @vsadd_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv4i32(, , , i32) - define @vsadd_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1210,8 +1172,6 @@ define @vsadd_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv8i32(, , , i32) - define @vsadd_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1276,8 +1236,6 @@ define @vsadd_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv16i32(, , , i32) - define @vsadd_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1344,8 +1302,6 @@ define @vsadd_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.sadd.sat.nxv32i32(, , , i32) - define @vsadd_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1395,8 +1351,6 @@ define @vsadd_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.sadd.sat.nxv1i64(, , , i32) - define @vsadd_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1489,8 +1443,6 @@ define @vsadd_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv2i64(, , , i32) - define @vsadd_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1583,8 +1535,6 @@ define @vsadd_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv4i64(, , , i32) - define @vsadd_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1677,8 +1627,6 @@ define @vsadd_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.sadd.sat.nxv8i64(, , , i32) - define @vsadd_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd.ll index 7729e7f00f5c4..4d16bb467fa46 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vsadd.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll index c146f61fbf976..1328f5964e903 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.uadd.sat.nxv1i8(, ) - define @uadd_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i8_vv: ; CHECK: # %bb.0: @@ -38,8 +36,6 @@ define @uadd_nxv1i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i8(, ) - define @uadd_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i8_vv: ; CHECK: # %bb.0: @@ -72,8 +68,6 @@ define @uadd_nxv2i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i8(, ) - define @uadd_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i8_vv: ; CHECK: # %bb.0: @@ -106,8 +100,6 @@ define @uadd_nxv4i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i8(, ) - define @uadd_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i8_vv: ; CHECK: # %bb.0: @@ -140,8 +132,6 @@ define @uadd_nxv8i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv16i8(, ) - define @uadd_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i8_vv: ; CHECK: # %bb.0: @@ -174,8 +164,6 @@ define @uadd_nxv16i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv32i8(, ) - define @uadd_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv32i8_vv: ; CHECK: # %bb.0: @@ -208,8 +196,6 @@ define @uadd_nxv32i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv64i8(, ) - define @uadd_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv64i8_vv: ; CHECK: # %bb.0: @@ -242,8 +228,6 @@ define @uadd_nxv64i8_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv1i16(, ) - define @uadd_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i16_vv: ; CHECK: # %bb.0: @@ -276,8 +260,6 @@ define @uadd_nxv1i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i16(, ) - define @uadd_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i16_vv: ; CHECK: # %bb.0: @@ -310,8 +292,6 @@ define @uadd_nxv2i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i16(, ) - define @uadd_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i16_vv: ; CHECK: # %bb.0: @@ -344,8 +324,6 @@ define @uadd_nxv4i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i16(, ) - define @uadd_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i16_vv: ; CHECK: # %bb.0: @@ -378,8 +356,6 @@ define @uadd_nxv8i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv16i16(, ) - define @uadd_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i16_vv: ; CHECK: # %bb.0: @@ -412,8 +388,6 @@ define @uadd_nxv16i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv32i16(, ) - define @uadd_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv32i16_vv: ; CHECK: # %bb.0: @@ -446,8 +420,6 @@ define @uadd_nxv32i16_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv1i32(, ) - define @uadd_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i32_vv: ; CHECK: # %bb.0: @@ -480,8 +452,6 @@ define @uadd_nxv1i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i32(, ) - define @uadd_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i32_vv: ; CHECK: # %bb.0: @@ -514,8 +484,6 @@ define @uadd_nxv2i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i32(, ) - define @uadd_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i32_vv: ; CHECK: # %bb.0: @@ -548,8 +516,6 @@ define @uadd_nxv4i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i32(, ) - define @uadd_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i32_vv: ; CHECK: # %bb.0: @@ -582,8 +548,6 @@ define @uadd_nxv8i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv16i32(, ) - define @uadd_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i32_vv: ; CHECK: # %bb.0: @@ -616,8 +580,6 @@ define @uadd_nxv16i32_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv1i64(, ) - define @uadd_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i64_vv: ; CHECK: # %bb.0: @@ -664,8 +626,6 @@ define @uadd_nxv1i64_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv2i64(, ) - define @uadd_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i64_vv: ; CHECK: # %bb.0: @@ -712,8 +672,6 @@ define @uadd_nxv2i64_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv4i64(, ) - define @uadd_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i64_vv: ; CHECK: # %bb.0: @@ -760,8 +718,6 @@ define @uadd_nxv4i64_vi( %va) { ret %v } -declare @llvm.uadd.sat.nxv8i64(, ) - define @uadd_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll index f76a2b4b78bca..a7d304261f87f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.uadd.sat.nxv8i7(, , , i32) - define @vsaddu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vsaddu_vx_nxv8i7( %a, i7 signext %b, ret %v } -declare @llvm.vp.uadd.sat.nxv1i8(, , , i32) - define @vsaddu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -101,8 +97,6 @@ define @vsaddu_vi_nxv1i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv2i8(, , , i32) - define @vsaddu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -167,8 +161,6 @@ define @vsaddu_vi_nxv2i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv3i8(, , , i32) - define @vsaddu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -233,8 +225,6 @@ define @vsaddu_vi_nxv3i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv4i8(, , , i32) - define @vsaddu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -299,8 +289,6 @@ define @vsaddu_vi_nxv4i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv8i8(, , , i32) - define @vsaddu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -365,8 +353,6 @@ define @vsaddu_vi_nxv8i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.uadd.sat.nxv16i8(, , , i32) - define @vsaddu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -431,8 +417,6 @@ define @vsaddu_vi_nxv16i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv32i8(, , , i32) - define @vsaddu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -497,8 +481,6 @@ define @vsaddu_vi_nxv32i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv64i8(, , , i32) - define @vsaddu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -565,8 +547,6 @@ define @vsaddu_vi_nxv64i8_unmasked( %va, i3 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.uadd.sat.nxv128i8(, , , i32) - define @vsaddu_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_nxv128i8: ; CHECK: # %bb.0: @@ -615,8 +595,6 @@ define @vsaddu_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv1i16(, , , i32) - define @vsaddu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -681,8 +659,6 @@ define @vsaddu_vi_nxv1i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv2i16(, , , i32) - define @vsaddu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -747,8 +723,6 @@ define @vsaddu_vi_nxv2i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv4i16(, , , i32) - define @vsaddu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -813,8 +787,6 @@ define @vsaddu_vi_nxv4i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv8i16(, , , i32) - define @vsaddu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -879,8 +851,6 @@ define @vsaddu_vi_nxv8i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv16i16(, , , i32) - define @vsaddu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -945,8 +915,6 @@ define @vsaddu_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv32i16(, , , i32) - define @vsaddu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1011,8 +979,6 @@ define @vsaddu_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv1i32(, , , i32) - define @vsaddu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1077,8 +1043,6 @@ define @vsaddu_vi_nxv1i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv2i32(, , , i32) - define @vsaddu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1143,8 +1107,6 @@ define @vsaddu_vi_nxv2i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv4i32(, , , i32) - define @vsaddu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1209,8 +1171,6 @@ define @vsaddu_vi_nxv4i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv8i32(, , , i32) - define @vsaddu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1275,8 +1235,6 @@ define @vsaddu_vi_nxv8i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv16i32(, , , i32) - define @vsaddu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1343,8 +1301,6 @@ define @vsaddu_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.uadd.sat.nxv32i32(, , , i32) - define @vsaddu_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1394,8 +1350,6 @@ define @vsaddu_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.uadd.sat.nxv1i64(, , , i32) - define @vsaddu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1488,8 +1442,6 @@ define @vsaddu_vi_nxv1i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv2i64(, , , i32) - define @vsaddu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1582,8 +1534,6 @@ define @vsaddu_vi_nxv2i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv4i64(, , , i32) - define @vsaddu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1676,8 +1626,6 @@ define @vsaddu_vi_nxv4i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.uadd.sat.nxv8i64(, , , i32) - define @vsaddu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsaddu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll index 0526a5f4b5500..032c9057aa0c8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vsaddu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll index 014f6a02d83a9..bc12a3c0e7488 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsbc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -52,13 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -76,13 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -100,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -124,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -148,13 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -172,13 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -196,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -220,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -244,13 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -268,13 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -292,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -316,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -340,13 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -364,13 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -388,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -412,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -436,13 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -460,13 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -484,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -508,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( - , - , - , - , - iXLen); - define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -556,13 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -580,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -604,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -628,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -652,13 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -676,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv64i8.i8( - , - , - i8, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -700,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -724,13 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -748,13 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -772,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -796,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -820,13 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv32i16.i16( - , - , - i16, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -844,13 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -892,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -916,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -940,13 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv16i32.i32( - , - , - i32, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -964,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv1i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1000,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv2i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1036,13 +742,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv4i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1072,13 +771,6 @@ entry: ret %a } -declare @llvm.riscv.vsbc.nxv8i64.i64( - , - , - i64, - , - iXLen); - define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll index 4442f97b8fe76..aef3aa563069f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vscale-power-of-two.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zve64x,+m -verify-machineinstrs < %s | FileCheck %s -declare i64 @llvm.vscale.i64() - define i64 @vscale_lshr(i64 %TC) { ; CHECK-LABEL: vscale_lshr: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll index 607ce2394ee81..3b0d4f8891963 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vse.nxv1i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -24,12 +19,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -62,11 +51,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -82,12 +66,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -104,11 +82,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -124,12 +97,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -146,11 +113,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -166,12 +128,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,11 +144,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -208,12 +159,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -230,11 +175,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -250,12 +190,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -272,11 +206,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -292,12 +221,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -314,11 +237,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f64( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -334,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f64( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -356,11 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -376,12 +283,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -398,11 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +314,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,11 +330,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -460,12 +345,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -482,11 +361,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -502,12 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -524,11 +392,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -544,12 +407,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -566,11 +423,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -586,12 +438,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -608,11 +454,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -628,12 +469,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -650,11 +485,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -670,12 +500,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -692,11 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -712,12 +531,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -734,11 +547,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f32( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -754,12 +562,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f32( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -776,11 +578,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -796,12 +593,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -818,11 +609,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -838,12 +624,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -860,11 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -880,12 +655,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -902,11 +671,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -922,12 +686,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -944,11 +702,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -964,12 +717,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -986,11 +733,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1006,12 +748,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1028,11 +764,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1048,12 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1070,11 +795,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1090,12 +810,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1112,11 +826,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1132,12 +841,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1154,11 +857,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1174,12 +872,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1196,11 +888,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1216,12 +903,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1238,11 +919,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32f16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1258,12 +934,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32f16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1280,11 +950,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1300,12 +965,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1322,11 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1342,12 +996,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1364,11 +1012,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1384,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1406,11 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1426,12 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1448,11 +1074,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1468,12 +1089,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1490,11 +1105,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32bf16( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1510,12 +1120,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32bf16( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1532,11 +1136,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv1i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1552,12 +1151,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1574,11 +1167,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv2i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1594,12 +1182,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1616,11 +1198,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv4i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1636,12 +1213,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1658,11 +1229,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv8i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1678,12 +1244,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1700,11 +1260,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv16i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1720,12 +1275,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1742,11 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv32i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1762,12 +1306,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1784,11 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vse.nxv64i8( - , - ptr, - iXLen); - define void @intrinsic_vse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1804,12 +1337,6 @@ entry: ret void } -declare void @llvm.riscv.vse.mask.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll index 76fd1e1d8293f..587c577c6e5c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp-bf16.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+m,+v,+zvfbfmin -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.select.nxv1bf16(, , , i32) - define @select_nxv1bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1bf16: ; CHECK: # %bb.0: @@ -16,8 +14,6 @@ define @select_nxv1bf16( %a, %v } -declare @llvm.vp.select.nxv2bf16(, , , i32) - define @select_nxv2bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2bf16: ; CHECK: # %bb.0: @@ -28,8 +24,6 @@ define @select_nxv2bf16( %a, %v } -declare @llvm.vp.select.nxv4bf16(, , , i32) - define @select_nxv4bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4bf16: ; CHECK: # %bb.0: @@ -40,8 +34,6 @@ define @select_nxv4bf16( %a, %v } -declare @llvm.vp.select.nxv8bf16(, , , i32) - define @select_nxv8bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8bf16: ; CHECK: # %bb.0: @@ -52,8 +44,6 @@ define @select_nxv8bf16( %a, %v } -declare @llvm.vp.select.nxv16bf16(, , , i32) - define @select_nxv16bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16bf16: ; CHECK: # %bb.0: @@ -64,8 +54,6 @@ define @select_nxv16bf16( %a, %v } -declare @llvm.vp.select.nxv32bf16(, , , i32) - define @select_nxv32bf16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32bf16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll index 5cecd3cae4d2a..d1933560f2698 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -8,8 +8,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+m,+zvfhmin,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.select.nxv1i1(, , , i32) - define @select_nxv1i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i1: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @select_nxv1i1( %a, ret %v } -declare @llvm.vp.select.nxv2i1(, , , i32) - define @select_nxv2i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i1: ; CHECK: # %bb.0: @@ -36,8 +32,6 @@ define @select_nxv2i1( %a, ret %v } -declare @llvm.vp.select.nxv4i1(, , , i32) - define @select_nxv4i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i1: ; CHECK: # %bb.0: @@ -50,8 +44,6 @@ define @select_nxv4i1( %a, ret %v } -declare @llvm.vp.select.nxv8i1(, , , i32) - define @select_nxv8i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i1: ; CHECK: # %bb.0: @@ -64,8 +56,6 @@ define @select_nxv8i1( %a, ret %v } -declare @llvm.vp.select.nxv16i1(, , , i32) - define @select_nxv16i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i1: ; CHECK: # %bb.0: @@ -78,8 +68,6 @@ define @select_nxv16i1( %a, %v } -declare @llvm.vp.select.nxv32i1(, , , i32) - define @select_nxv32i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i1: ; CHECK: # %bb.0: @@ -92,8 +80,6 @@ define @select_nxv32i1( %a, %v } -declare @llvm.vp.select.nxv64i1(, , , i32) - define @select_nxv64i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv64i1: ; CHECK: # %bb.0: @@ -106,8 +92,6 @@ define @select_nxv64i1( %a, %v } -declare @llvm.vp.select.nxv8i7(, , , i32) - define @select_nxv8i7( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i7: ; CHECK: # %bb.0: @@ -118,8 +102,6 @@ define @select_nxv8i7( %a, ret %v } -declare @llvm.vp.select.nxv1i8(, , , i32) - define @select_nxv1i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i8: ; CHECK: # %bb.0: @@ -130,8 +112,6 @@ define @select_nxv1i8( %a, ret %v } -declare @llvm.vp.select.nxv2i8(, , , i32) - define @select_nxv2i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i8: ; CHECK: # %bb.0: @@ -142,8 +122,6 @@ define @select_nxv2i8( %a, ret %v } -declare @llvm.vp.select.nxv4i8(, , , i32) - define @select_nxv4i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i8: ; CHECK: # %bb.0: @@ -154,8 +132,6 @@ define @select_nxv4i8( %a, ret %v } -declare @llvm.vp.select.nxv8i8(, , , i32) - define @select_nxv8i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i8: ; CHECK: # %bb.0: @@ -166,8 +142,6 @@ define @select_nxv8i8( %a, ret %v } -declare @llvm.vp.select.nxv14i8(, , , i32) - define @select_nxv14i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv14i8: ; CHECK: # %bb.0: @@ -178,8 +152,6 @@ define @select_nxv14i8( %a, %v } -declare @llvm.vp.select.nxv16i8(, , , i32) - define @select_nxv16i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i8: ; CHECK: # %bb.0: @@ -190,8 +162,6 @@ define @select_nxv16i8( %a, %v } -declare @llvm.vp.select.nxv32i8(, , , i32) - define @select_nxv32i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i8: ; CHECK: # %bb.0: @@ -202,8 +172,6 @@ define @select_nxv32i8( %a, %v } -declare @llvm.vp.select.nxv64i8(, , , i32) - define @select_nxv64i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv64i8: ; CHECK: # %bb.0: @@ -214,8 +182,6 @@ define @select_nxv64i8( %a, %v } -declare @llvm.vp.select.nxv1i16(, , , i32) - define @select_nxv1i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i16: ; CHECK: # %bb.0: @@ -226,8 +192,6 @@ define @select_nxv1i16( %a, %v } -declare @llvm.vp.select.nxv2i16(, , , i32) - define @select_nxv2i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i16: ; CHECK: # %bb.0: @@ -238,8 +202,6 @@ define @select_nxv2i16( %a, %v } -declare @llvm.vp.select.nxv4i16(, , , i32) - define @select_nxv4i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i16: ; CHECK: # %bb.0: @@ -250,8 +212,6 @@ define @select_nxv4i16( %a, %v } -declare @llvm.vp.select.nxv8i16(, , , i32) - define @select_nxv8i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i16: ; CHECK: # %bb.0: @@ -262,8 +222,6 @@ define @select_nxv8i16( %a, %v } -declare @llvm.vp.select.nxv16i16(, , , i32) - define @select_nxv16i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i16: ; CHECK: # %bb.0: @@ -274,8 +232,6 @@ define @select_nxv16i16( %a, %v } -declare @llvm.vp.select.nxv32i16(, , , i32) - define @select_nxv32i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i16: ; CHECK: # %bb.0: @@ -286,8 +242,6 @@ define @select_nxv32i16( %a, %v } -declare @llvm.vp.select.nxv1i32(, , , i32) - define @select_nxv1i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i32: ; CHECK: # %bb.0: @@ -298,8 +252,6 @@ define @select_nxv1i32( %a, %v } -declare @llvm.vp.select.nxv2i32(, , , i32) - define @select_nxv2i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i32: ; CHECK: # %bb.0: @@ -310,8 +262,6 @@ define @select_nxv2i32( %a, %v } -declare @llvm.vp.select.nxv4i32(, , , i32) - define @select_nxv4i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i32: ; CHECK: # %bb.0: @@ -322,8 +272,6 @@ define @select_nxv4i32( %a, %v } -declare @llvm.vp.select.nxv8i32(, , , i32) - define @select_nxv8i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i32: ; CHECK: # %bb.0: @@ -334,8 +282,6 @@ define @select_nxv8i32( %a, %v } -declare @llvm.vp.select.nxv16i32(, , , i32) - define @select_nxv16i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i32: ; CHECK: # %bb.0: @@ -346,8 +292,6 @@ define @select_nxv16i32( %a, %v } -declare @llvm.vp.select.nxv32i32(, , , i32) - define @select_nxv32i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i32: ; CHECK: # %bb.0: @@ -395,8 +339,6 @@ define @select_nxv32i32( %a, %v } -declare i32 @llvm.vscale.i32() - define @select_evl_nxv32i32( %a, %b, %c) { ; RV32-LABEL: select_evl_nxv32i32: ; RV32: # %bb.0: @@ -467,8 +409,6 @@ define @select_evl_nxv32i32( %a, %v } -declare @llvm.vp.select.nxv1i64(, , , i32) - define @select_nxv1i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i64: ; CHECK: # %bb.0: @@ -479,8 +419,6 @@ define @select_nxv1i64( %a, %v } -declare @llvm.vp.select.nxv2i64(, , , i32) - define @select_nxv2i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i64: ; CHECK: # %bb.0: @@ -513,8 +451,6 @@ define @select_nxv2i64_constant_false( %a, < ret %v } -declare @llvm.vp.select.nxv4i64(, , , i32) - define @select_nxv4i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i64: ; CHECK: # %bb.0: @@ -525,8 +461,6 @@ define @select_nxv4i64( %a, %v } -declare @llvm.vp.select.nxv8i64(, , , i32) - define @select_nxv8i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i64: ; CHECK: # %bb.0: @@ -537,8 +471,6 @@ define @select_nxv8i64( %a, %v } -declare @llvm.vp.select.nxv1f16(, , , i32) - define @select_nxv1f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f16: ; CHECK: # %bb.0: @@ -549,8 +481,6 @@ define @select_nxv1f16( %a, %v } -declare @llvm.vp.select.nxv2f16(, , , i32) - define @select_nxv2f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f16: ; CHECK: # %bb.0: @@ -561,8 +491,6 @@ define @select_nxv2f16( %a, %v } -declare @llvm.vp.select.nxv4f16(, , , i32) - define @select_nxv4f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f16: ; CHECK: # %bb.0: @@ -573,8 +501,6 @@ define @select_nxv4f16( %a, %v } -declare @llvm.vp.select.nxv8f16(, , , i32) - define @select_nxv8f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f16: ; CHECK: # %bb.0: @@ -585,8 +511,6 @@ define @select_nxv8f16( %a, %v } -declare @llvm.vp.select.nxv16f16(, , , i32) - define @select_nxv16f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f16: ; CHECK: # %bb.0: @@ -597,8 +521,6 @@ define @select_nxv16f16( %a, %v } -declare @llvm.vp.select.nxv32f16(, , , i32) - define @select_nxv32f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32f16: ; CHECK: # %bb.0: @@ -609,8 +531,6 @@ define @select_nxv32f16( %a, %v } -declare @llvm.vp.select.nxv1f32(, , , i32) - define @select_nxv1f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f32: ; CHECK: # %bb.0: @@ -621,8 +541,6 @@ define @select_nxv1f32( %a, %v } -declare @llvm.vp.select.nxv2f32(, , , i32) - define @select_nxv2f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f32: ; CHECK: # %bb.0: @@ -633,8 +551,6 @@ define @select_nxv2f32( %a, %v } -declare @llvm.vp.select.nxv4f32(, , , i32) - define @select_nxv4f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f32: ; CHECK: # %bb.0: @@ -645,8 +561,6 @@ define @select_nxv4f32( %a, %v } -declare @llvm.vp.select.nxv8f32(, , , i32) - define @select_nxv8f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f32: ; CHECK: # %bb.0: @@ -657,8 +571,6 @@ define @select_nxv8f32( %a, %v } -declare @llvm.vp.select.nxv16f32(, , , i32) - define @select_nxv16f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f32: ; CHECK: # %bb.0: @@ -669,8 +581,6 @@ define @select_nxv16f32( %a, %v } -declare @llvm.vp.select.nxv1f64(, , , i32) - define @select_nxv1f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f64: ; CHECK: # %bb.0: @@ -681,8 +591,6 @@ define @select_nxv1f64( %a, %v } -declare @llvm.vp.select.nxv2f64(, , , i32) - define @select_nxv2f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f64: ; CHECK: # %bb.0: @@ -693,8 +601,6 @@ define @select_nxv2f64( %a, %v } -declare @llvm.vp.select.nxv4f64(, , , i32) - define @select_nxv4f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f64: ; CHECK: # %bb.0: @@ -705,8 +611,6 @@ define @select_nxv4f64( %a, %v } -declare @llvm.vp.select.nxv8f64(, , , i32) - define @select_nxv8f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f64: ; CHECK: # %bb.0: @@ -717,8 +621,6 @@ define @select_nxv8f64( %a, %v } -declare @llvm.vp.select.nxv16f64(, , , i32) - define @select_nxv16f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll index 27d76bf41912e..a76427dfa3e96 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.riscv.vsetvli( - i64, i64, i64); - define signext i32 @vsetvl_sext() { ; CHECK-LABEL: vsetvl_sext: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll index 4fed1c2cd0522..71710432b7e42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll @@ -2,19 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs -O0 < %s | FileCheck %s -declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare i64 @llvm.riscv.vsetvlimax(i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - i64, i64) -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - i64, i64) - define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind { ; CHECK-LABEL: fixed_length: ; CHECK: # %bb.0: # %entry @@ -49,7 +36,6 @@ entry: ret %2 } - define @intrinsic_same_vlmax( %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_vlmax: ; CHECK: # %bb.0: # %entry @@ -77,7 +63,6 @@ entry: ret %2 } - define @intrinsic_same_avl_imm( %a, %b) nounwind { ; CHECK-LABEL: intrinsic_same_avl_imm: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 05b76ec7733bb..60a291402b551 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -5,21 +5,6 @@ ; The following tests check whether inserting VSETVLI avoids inserting ; unneeded vsetvlis across basic blocks. -declare i64 @llvm.riscv.vsetvli(i64, i64, i64) - -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64, i64) -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64, i64) - -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , , i64, i64) - -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , , i64, i64) - -declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(, double, i64) -declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( , float, i64) - -declare void @llvm.riscv.vse.nxv1f64(, ptr nocapture, i64) -declare void @llvm.riscv.vse.nxv2f32(, ptr nocapture, i64) - define @test1(i64 %avl, i8 zeroext %cond, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry @@ -533,11 +518,6 @@ for.end: ; preds = %for.body, %entry ret void } -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv16f32.i64(, ptr nocapture, i64) -declare @llvm.riscv.vfmacc.nxv16f32.f32.i64(, float, , i64, i64, i64) -declare void @llvm.riscv.vse.nxv16f32.i64(, ptr nocapture, i64) - ; We need a vsetvli in the last block because the predecessors have different ; VTYPEs. The AVL is the same and the SEW/LMUL ratio implies the same VLMAX so ; we don't need to read AVL and can keep VL unchanged. @@ -570,10 +550,6 @@ if.end: %e = call @llvm.riscv.vadd.nxv2i32( poison, %a, %d, i64 %vl) ret %e } -declare @llvm.riscv.vle.nxv2i32(, ptr, i64) -declare @llvm.riscv.vle.nxv2i16(, ptr, i64) -declare @llvm.riscv.vwadd.nxv2i32(, , i16, i64) -declare @llvm.riscv.vadd.nxv2i32(, , , i64) ; We can use X0, X0 vsetvli in if2 and if2.end. The merge point as if.end will ; see two different vtypes with the same SEW/LMUL ratio. At if2.end we will only @@ -625,7 +601,6 @@ if2.end: %h = call @llvm.riscv.vadd.nxv2i32( poison, %g, %w, i64 %vl) ret %h } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , , i64) ; We should only need 1 vsetvli for this code. define void @vlmax(i64 %N, ptr %c, ptr %a, ptr %b) { @@ -1018,18 +993,6 @@ exit: ret void } -declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64) -declare @llvm.riscv.vle.nxv1f64.i64(, ptr nocapture, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(, , , i64, i64) -declare void @llvm.riscv.vse.nxv1f64.i64(, ptr nocapture, i64) -declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( - , - , - , - , - i64, - i64); - ; Normally a pseudo's AVL is already live in its block, so it will already be ; live where we're inserting the vsetvli, before the pseudo. In some cases the ; AVL can be from a predecessor block, so make sure we extend its live range diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index e09fc1828fec5..a35100654432c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -46,8 +46,6 @@ ret void } - declare i64 @llvm.riscv.vmv.x.s.nxv1i64() #1 - define i64 @vmv_x_s(i8 zeroext %cond, %0, %1, i64 %2) #0 { entry: %tobool = icmp eq i8 %cond, 0 @@ -67,8 +65,6 @@ ret i64 %d } - declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2 - define @vsetvli_add_or_sub(i8 zeroext %cond, %0, %1, i64 %avl) #0 { entry: %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) @@ -146,27 +142,7 @@ ret void } - declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) - - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 - - declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 - - declare @llvm.riscv.vle.nxv1i64.i64(, ptr nocapture, i64) #3 - - declare @llvm.riscv.vle.nxv1i32.i64(, ptr nocapture, i64) #3 - - declare void @llvm.riscv.vse.nxv1i64.i64(, ptr nocapture, i64) #4 - - declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 - - declare @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(, , i64) #1 - attributes #0 = { "target-features"="+v" } - attributes #1 = { nounwind readnone } - attributes #2 = { nounwind } - attributes #3 = { nounwind readonly } - attributes #4 = { nounwind writeonly } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index 2293a1e6979f4..5b56bfc535b75 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -2,19 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v -verify-machineinstrs -O2 < %s | FileCheck %s --check-prefixes=CHECK,NODEPVL ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v,+vl-dependent-latency -verify-machineinstrs -O2 < %s | FileCheck %s --check-prefixes=CHECK,DEPVL -declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare i64 @llvm.riscv.vsetvlimax(i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( - , - , - , - i64, i64) -declare @llvm.riscv.vle.mask.nxv1i64( - , - ptr, - , - i64, i64) - define @test1(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry @@ -95,8 +82,6 @@ entry: %b = call @llvm.riscv.vmand.nxv1i1.i64( %a, %2, i64 %vl) ret %b } -declare @llvm.riscv.vmseq.nxv1i64.i64(, , i64) -declare @llvm.riscv.vmand.nxv1i1.i64(, , i64) ; Make sure we don't insert a vsetvli for the vmor instruction. define void @test6(ptr nocapture readonly %A, ptr nocapture %B, i64 %n) { @@ -300,7 +285,6 @@ entry: ret %f2 } - @gdouble = external global double define @test16(i64 %avl, double %a, %b) nounwind { @@ -346,7 +330,6 @@ entry: ret double %c3 } - define @test18( %a, double %b) nounwind { ; CHECK-LABEL: test18: ; CHECK: # %bb.0: # %entry @@ -431,7 +414,6 @@ entry: ret i64 %vl } - ; %vl is intentionally used only once define void @avl_forward3( %v, ptr %p, i64 %reg) nounwind { ; CHECK-LABEL: avl_forward3: @@ -529,12 +511,6 @@ entry: ret %5 } -declare { , i64 } @llvm.riscv.vleff.nxv1i64.i64( - , ptr nocapture, i64) - -declare @llvm.riscv.vmseq.nxv1i64.i64.i64( - , i64, i64) - ; Ensure AVL register is alive when forwarding an AVL immediate that does not fit in 5 bits define @avl_forward5(ptr %addr) { ; CHECK-LABEL: avl_forward5: @@ -549,8 +525,6 @@ define @avl_forward5(ptr %addr) { ret %ret } -declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(, , , i64, i64) - define @test20(i64 %avl, %a, %b, %c) nounwind { ; CHECK-LABEL: test20: ; CHECK: # %bb.0: # %entry @@ -590,7 +564,6 @@ bb: ret i64 %tmp2 } - define void @add_v128i8(ptr %x, ptr %y) vscale_range(2,2) { ; CHECK-LABEL: add_v128i8: ; CHECK: # %bb.0: @@ -649,55 +622,6 @@ define dso_local @int_reduction_vmv_s_x(i32 signext %0, %6 } -declare @llvm.riscv.vfmv.s.f.nxv8f32.i64(, float, i64) -declare @llvm.vector.extract.nxv2f32.nxv8f32(, i64) -declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(, , , i64, i64) - -declare @llvm.riscv.vmv.s.x.nxv8i32.i64(, i32, i64) #1 -declare @llvm.vector.extract.nxv2i32.nxv8i32(, i64 immarg) #2 -declare @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(, , , i64) #1 - -declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( - , - , - , - , - i64, - i64); - -declare @llvm.riscv.vadd.nxv1i64.i64.i64( - , - , - i64, - i64); - -declare @llvm.riscv.vfadd.mask.nxv1f64.f64( - , - , - , - , - i64, - i64, - i64); - -declare @llvm.riscv.vmv.s.x.nxv1i64( - , - i64, - i64); - -declare @llvm.riscv.vfmv.s.f.nxv1f64 - (, - double, - i64) - -declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv2i32.i64(, ptr nocapture, i64) -declare @llvm.riscv.vmslt.nxv2i32.i32.i64(, i32, i64) -declare @llvm.riscv.vmsgt.nxv2i32.i32.i64(, i32, i64) -declare @llvm.riscv.vmor.nxv2i1.i64(, , i64) -declare void @llvm.riscv.vse.mask.nxv2i32.i64(, ptr nocapture, , i64) -declare void @llvm.riscv.vse.nxv2i32.i64(, ptr nocapture, i64) - define @avl_undef1(, , ) { ; CHECK-LABEL: avl_undef1: ; CHECK: # %bb.0: @@ -814,7 +738,6 @@ entry: ret %2 } - define @vmv.v.x_vl1() nounwind { ; NODEPVL-LABEL: vmv.v.x_vl1: ; NODEPVL: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir index 396ca517e4017..6e6b708dad694 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -27,8 +27,6 @@ ret %b } - declare i64 @llvm.riscv.vmv.x.s.nxv1i64() #1 - define i64 @vmv_x_s( %0) #0 { entry: %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( %0) @@ -43,16 +41,12 @@ ret void } - declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2 - define i64 @vreduce_add_v2i64(ptr %x) #0 { %v = load <2 x i64>, ptr %x, align 16 %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v) ret i64 %red } - declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3 - define @vsetvli_add( %0, %1, i64 %avl) #0 { entry: %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) @@ -112,19 +106,7 @@ ret void } - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 - - declare @llvm.riscv.vle.nxv1i64.i64(, ptr nocapture, i64) #4 - - declare @llvm.riscv.vle.nxv1i32.i64(, ptr nocapture, i64) #4 - - declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, , i64) #1 - attributes #0 = { "target-features"="+v" } - attributes #1 = { nounwind readnone } - attributes #2 = { nofree nosync nounwind readnone willreturn } - attributes #3 = { nounwind } - attributes #4 = { nounwind readonly } ... --- diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll index a4d372d0b5a62..3e8c9006a9d2b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll @@ -10,9 +10,6 @@ ; RUN: -riscv-v-vector-bits-max=128 -verify-machineinstrs \ ; RUN: | FileCheck %s --check-prefixes=CHECK,VLEN128 -declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen, iXLen) -declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen) - define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e8m1: ; CHECK: # %bb.0: @@ -102,8 +99,6 @@ define void @test_vsetvlimax_e32m2_nouse() nounwind { ret void } -declare @llvm.riscv.vle.nxv4i32.iXLen(, ptr, iXLen) - ; Check that we remove the redundant vsetvli when followed by another operation define @redundant_vsetvli(iXLen %avl, ptr %ptr) nounwind { ; CHECK-LABEL: redundant_vsetvli: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll index f92f5e934f9f4..2000cd81157a9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll @@ -31,9 +31,6 @@ entry: ret void } -declare @llvm.riscv.vle.nxv1i64.i64(, ptr nocapture, i64) -declare @llvm.riscv.vfmv.s.f.nxv4f16.i64(, half, i64) - define void @bar(half %y, ptr %i32p) { ; CHECK-NO-FELEN64-LABEL: bar: ; CHECK-NO-FELEN64: # %bb.0: # %entry @@ -61,4 +58,3 @@ entry: ret void } -declare @llvm.riscv.vle.nxv2i32.i64(, ptr nocapture, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll index 09162b55c7079..c64201462ad11 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s -declare i64 @llvm.riscv.vsetvlimax(i64, i64); - define signext i32 @vsetvlmax_sext() { ; CHECK-LABEL: vsetvlmax_sext: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll index 04aed5d81db99..43fde84600de7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.sext.nxv2i16.nxv2i1(, , i32) - define @vsext_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vsext_nxv2i1_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i32.nxv2i1(, , i32) - define @vsext_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vsext_nxv2i1_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i1(, , i32) - define @vsext_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll index eec2a5f3efcfb..07411b1c7ae08 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.sext.nxv2i16.nxv2i8(, , i32) - define @vsext_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vsext_nxv2i8_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i32.nxv2i8(, , i32) - define @vsext_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vsext_nxv2i8_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i8(, , i32) - define @vsext_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define @vsext_nxv2i8_nxv2i64_unmasked( %a, i ret %v } -declare @llvm.vp.sext.nxv2i32.nxv2i16(, , i32) - define @vsext_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define @vsext_nxv2i16_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i16(, , i32) - define @vsext_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define @vsext_nxv2i16_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.sext.nxv2i64.nxv2i32(, , i32) - define @vsext_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define @vsext_nxv2i32_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.sext.nxv32i32.nxv32i8(, , i32) - define @vsext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext.ll b/llvm/test/CodeGen/RISCV/rvv/vsext.ll index fdc394189bce5..d7ebc209bd870 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i64.nxv2i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i64.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i64.nxv4i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i64.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i64.nxv8i8( - , - , - iXLen); - define @intrinsic_vsext_vf8_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i64.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf8_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i64.nxv1i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i64.nxv2i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i64.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i64.nxv4i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i64.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i64.nxv8i16( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i64.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i32.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i32.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i32.nxv2i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i32.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i32.nxv4i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i32.nxv8i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv16i32.nxv16i8( - , - , - iXLen); - define @intrinsic_vsext_vf4_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv16i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf4_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i64.nxv1i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i64.nxv2i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -664,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i64.nxv4i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -685,13 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -708,11 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i64.nxv8i32( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -729,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -752,11 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i32.nxv1i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -773,13 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -796,11 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i32.nxv2i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -817,13 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -840,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i32.nxv4i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -861,13 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -884,11 +644,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i32.nxv8i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -905,13 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -928,11 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv16i32.nxv16i16( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -949,13 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -972,11 +708,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv1i16.nxv1i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1016,11 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv2i16.nxv2i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1037,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1060,11 +772,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv4i16.nxv4i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1081,13 +788,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1104,11 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv8i16.nxv8i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1125,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1148,11 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv16i16.nxv16i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1169,13 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1192,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.nxv32i16.nxv32i8( - , - , - iXLen); - define @intrinsic_vsext_vf2_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1213,13 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vsext.mask.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vsext_mask_vf2_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll index 9674b78ab9fa0..a571ff577079f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll @@ -10,13 +10,6 @@ ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. -declare @llvm.riscv.vsha2ch.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -34,13 +27,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -58,13 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -83,13 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -107,13 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ch.nxv8i64.nxv8i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ch_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll index b45a768b9ce22..b9ae90b8efd42 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll @@ -10,13 +10,6 @@ ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. -declare @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -34,13 +27,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -58,13 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -83,13 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -107,13 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll index ff51acc2f19aa..861211194b588 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll @@ -10,13 +10,6 @@ ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. -declare @llvm.riscv.vsha2ms.nxv4i32.nxv4i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -34,13 +27,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv8i32.nxv8i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -58,13 +44,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv16i32.nxv16i32( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -83,13 +62,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv4i64.nxv4i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -107,13 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsha2ms.nxv8i64.nxv8i64( - , - , - , - iXLen, - iXLen) - define @intrinsic_vsha2ms_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll index f5c46aec86b86..b335974a7b9f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.shl.nxv8i7(, , , i32) - define @vsll_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i7: ; CHECK: # %bb.0: @@ -21,8 +19,6 @@ define @vsll_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.shl.nxv1i8(, , , i32) - define @vsll_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i8: ; CHECK: # %bb.0: @@ -87,8 +83,6 @@ define @vsll_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv2i8(, , , i32) - define @vsll_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i8: ; CHECK: # %bb.0: @@ -153,8 +147,6 @@ define @vsll_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv4i8(, , , i32) - define @vsll_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i8: ; CHECK: # %bb.0: @@ -219,8 +211,6 @@ define @vsll_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv5i8(, , , i32) - define @vsll_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv5i8: ; CHECK: # %bb.0: @@ -231,8 +221,6 @@ define @vsll_vv_nxv5i8( %va, %v } -declare @llvm.vp.shl.nxv8i8(, , , i32) - define @vsll_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i8: ; CHECK: # %bb.0: @@ -297,8 +285,6 @@ define @vsll_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.shl.nxv16i8(, , , i32) - define @vsll_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i8: ; CHECK: # %bb.0: @@ -363,8 +349,6 @@ define @vsll_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv32i8(, , , i32) - define @vsll_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i8: ; CHECK: # %bb.0: @@ -429,8 +413,6 @@ define @vsll_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv64i8(, , , i32) - define @vsll_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv64i8: ; CHECK: # %bb.0: @@ -495,8 +477,6 @@ define @vsll_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv1i16(, , , i32) - define @vsll_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i16: ; CHECK: # %bb.0: @@ -561,8 +541,6 @@ define @vsll_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv2i16(, , , i32) - define @vsll_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i16: ; CHECK: # %bb.0: @@ -627,8 +605,6 @@ define @vsll_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv4i16(, , , i32) - define @vsll_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i16: ; CHECK: # %bb.0: @@ -693,8 +669,6 @@ define @vsll_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv8i16(, , , i32) - define @vsll_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i16: ; CHECK: # %bb.0: @@ -759,8 +733,6 @@ define @vsll_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv16i16(, , , i32) - define @vsll_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i16: ; CHECK: # %bb.0: @@ -825,8 +797,6 @@ define @vsll_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.shl.nxv32i16(, , , i32) - define @vsll_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i16: ; CHECK: # %bb.0: @@ -891,8 +861,6 @@ define @vsll_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.shl.nxv1i32(, , , i32) - define @vsll_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i32: ; CHECK: # %bb.0: @@ -957,8 +925,6 @@ define @vsll_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv2i32(, , , i32) - define @vsll_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1023,8 +989,6 @@ define @vsll_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv4i32(, , , i32) - define @vsll_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1089,8 +1053,6 @@ define @vsll_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv8i32(, , , i32) - define @vsll_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1155,8 +1117,6 @@ define @vsll_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv16i32(, , , i32) - define @vsll_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1221,8 +1181,6 @@ define @vsll_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.shl.nxv1i64(, , , i32) - define @vsll_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1299,8 +1257,6 @@ define @vsll_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv2i64(, , , i32) - define @vsll_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1377,8 +1333,6 @@ define @vsll_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv4i64(, , , i32) - define @vsll_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1455,8 +1409,6 @@ define @vsll_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.shl.nxv8i64(, , , i32) - define @vsll_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll index ec16e58f6e57d..3de3447f009ec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -31,8 +31,6 @@ define @vsitofp_nxv2bf16_nxv2i1_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i1(, , i32) - define @vsitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -58,8 +56,6 @@ define @vsitofp_nxv2f16_nxv2i1_unmasked( %v ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i1(, , i32) - define @vsitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -85,8 +81,6 @@ define @vsitofp_nxv2f32_nxv2i1_unmasked( % ret %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i1(, , i32) - define @vsitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll index f69ae3d560ef7..7f96da141c363 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -20,8 +20,6 @@ define @vsitofp_nxv2bf16_nxv2i7( %va, %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i8(, , i32) - define @vsitofp_nxv2bf16_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i8: ; CHECK: # %bb.0: @@ -48,8 +46,6 @@ define @vsitofp_nxv2bf16_nxv2i8_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i16(, , i32) - define @vsitofp_nxv2bf16_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i16: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define @vsitofp_nxv2bf16_nxv2i16_unmasked( %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i32(, , i32) - define @vsitofp_nxv2bf16_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: @@ -100,8 +94,6 @@ define @vsitofp_nxv2bf16_nxv2i32_unmasked( %v } -declare @llvm.vp.sitofp.nxv2bf16.nxv2i64(, , i32) - define @vsitofp_nxv2bf16_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: @@ -126,8 +118,6 @@ define @vsitofp_nxv2bf16_nxv2i64_unmasked( %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i7(, , i32) - define @vsitofp_nxv2f16_nxv2i7( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i7: ; ZVFH: # %bb.0: @@ -152,8 +142,6 @@ define @vsitofp_nxv2f16_nxv2i7( %va, %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i8(, , i32) - define @vsitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i8: ; ZVFH: # %bb.0: @@ -194,8 +182,6 @@ define @vsitofp_nxv2f16_nxv2i8_unmasked( %v ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i16(, , i32) - define @vsitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i16: ; ZVFH: # %bb.0: @@ -232,8 +218,6 @@ define @vsitofp_nxv2f16_nxv2i16_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i32(, , i32) - define @vsitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i32: ; ZVFH: # %bb.0: @@ -272,8 +256,6 @@ define @vsitofp_nxv2f16_nxv2i32_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f16.nxv2i64(, , i32) - define @vsitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv2f16_nxv2i64: ; ZVFH: # %bb.0: @@ -314,8 +296,6 @@ define @vsitofp_nxv2f16_nxv2i64_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i8(, , i32) - define @vsitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -338,8 +318,6 @@ define @vsitofp_nxv2f32_nxv2i8_unmasked( % ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i16(, , i32) - define @vsitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -362,8 +340,6 @@ define @vsitofp_nxv2f32_nxv2i16_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i32(, , i32) - define @vsitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -384,8 +360,6 @@ define @vsitofp_nxv2f32_nxv2i32_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f32.nxv2i64(, , i32) - define @vsitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -408,8 +382,6 @@ define @vsitofp_nxv2f32_nxv2i64_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i8(, , i32) - define @vsitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -432,8 +404,6 @@ define @vsitofp_nxv2f64_nxv2i8_unmasked( ret %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i16(, , i32) - define @vsitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -456,8 +426,6 @@ define @vsitofp_nxv2f64_nxv2i16_unmasked( %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i32(, , i32) - define @vsitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -480,8 +448,6 @@ define @vsitofp_nxv2f64_nxv2i32_unmasked( %v } -declare @llvm.vp.sitofp.nxv2f64.nxv2i64(, , i32) - define @vsitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -502,8 +468,6 @@ define @vsitofp_nxv2f64_nxv2i64_unmasked( %v } -declare @llvm.vp.sitofp.nxv32f16.nxv32i32(, , i32) - define @vsitofp_nxv32f16_nxv32i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32: ; ZVFH: # %bb.0: @@ -560,8 +524,6 @@ define @vsitofp_nxv32f16_nxv32i32( %va, ret %v } -declare @llvm.vp.sitofp.nxv32f32.nxv32i32(, , i32) - define @vsitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll index dd1d2df1236ff..f6580164a2203 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll @@ -11,12 +11,6 @@ ; RUN: -mattr=+zve64x,+zvl64b -verify-machineinstrs \ ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-64 -declare @llvm.riscv.vslide1down.nxv1i64.i64( - , - , - i64, - i32) - define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll index 0d8a4e827530f..8f5c8efd2d070 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vslide1down.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -333,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -355,14 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -380,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -402,14 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -427,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -449,14 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -474,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -496,14 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -521,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -543,14 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -568,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -590,14 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -615,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -637,14 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -662,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -684,14 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -709,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -731,14 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -756,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -778,14 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -803,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -825,14 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -850,12 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -881,14 +623,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -917,12 +651,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -984,12 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1015,14 +729,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1051,12 +757,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1082,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1down.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll index 161d3dd021600..c389db39b9f07 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll @@ -11,12 +11,6 @@ ; RUN: -mattr=+zve64x,+zvl64b -verify-machineinstrs \ ; RUN: < %s | FileCheck %s --check-prefixes=CHECK,CHECK-64 -declare @llvm.riscv.vslide1up.nxv1i64.i64( - , - , - i64, - i32) - define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll index df19707180a82..d30110d43a53d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vslide1up.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -219,14 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -244,12 +174,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -267,14 +191,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -292,12 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -315,14 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -340,12 +242,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -363,14 +259,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -388,12 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -411,14 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -436,12 +310,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -459,14 +327,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -484,12 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -507,14 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -532,12 +378,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -555,14 +395,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -580,12 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -603,14 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -628,12 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -651,14 +463,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -676,12 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -699,14 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -724,12 +514,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -747,14 +531,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -772,12 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -795,14 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -820,12 +582,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -843,14 +599,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -868,12 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -936,12 +670,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -968,14 +696,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1004,12 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1036,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -1072,12 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -1104,14 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vslide1up.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll index 22a90cc2c94ac..40d68e6d76727 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vslidedown.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -86,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -250,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -274,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -332,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv16i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -356,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv16i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -414,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv32i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -438,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv32i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -496,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -520,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -578,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -602,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -660,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -684,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -742,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -766,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -824,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv16i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -848,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -906,13 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -930,13 +769,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -988,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1070,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1094,13 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1152,13 +956,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +973,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1234,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1258,13 +1041,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1316,13 +1092,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1340,13 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1398,13 +1160,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1177,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1480,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1504,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1562,13 +1296,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1586,13 +1313,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1644,13 +1364,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1668,13 +1381,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1726,13 +1432,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1750,13 +1449,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1808,13 +1500,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv16f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1832,13 +1517,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv16f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1890,13 +1568,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1914,13 +1585,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1972,13 +1636,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1996,13 +1653,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -2054,13 +1704,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2078,13 +1721,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2136,13 +1772,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv8f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2160,13 +1789,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv8f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2218,13 +1840,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv1f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2242,13 +1857,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv1f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2300,13 +1908,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv2f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2324,13 +1925,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv2f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2382,13 +1976,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.nxv4f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -2406,13 +1993,6 @@ entry: ret %a } -declare @llvm.riscv.vslidedown.mask.nxv4f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll index 0291207ccdb49..a2595e8652fd5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslideup.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vslideup.nxv1i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -86,13 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -110,13 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +157,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -250,13 +208,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -274,13 +225,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -332,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv16i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -356,13 +293,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv16i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -414,13 +344,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv32i8( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -438,13 +361,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv32i8( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -496,13 +412,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -520,13 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -578,13 +480,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -602,13 +497,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -660,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -684,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -742,13 +616,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -766,13 +633,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -824,13 +684,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv16i16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -848,13 +701,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv16i16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -906,13 +752,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -930,13 +769,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -988,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1012,13 +837,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1070,13 +888,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1094,13 +905,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1152,13 +956,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8i32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +973,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8i32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1234,13 +1024,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1258,13 +1041,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1316,13 +1092,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1340,13 +1109,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1398,13 +1160,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4i64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1422,13 +1177,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4i64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1480,13 +1228,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1504,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1562,13 +1296,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1586,13 +1313,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1644,13 +1364,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1668,13 +1381,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1726,13 +1432,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1750,13 +1449,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1808,13 +1500,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv16f16( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1832,13 +1517,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv16f16( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1890,13 +1568,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1914,13 +1585,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -1972,13 +1636,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -1996,13 +1653,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -2054,13 +1704,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2078,13 +1721,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -2136,13 +1772,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv8f32( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2160,13 +1789,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv8f32( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -2218,13 +1840,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv1f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2242,13 +1857,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv1f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -2300,13 +1908,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv2f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2324,13 +1925,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv2f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -2382,13 +1976,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.nxv4f64( - , - , - iXLen, - iXLen, - iXLen); - define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -2406,13 +1993,6 @@ entry: ret %a } -declare @llvm.riscv.vslideup.mask.nxv4f64( - , - , - iXLen, - , - iXLen, iXLen); - define @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll.ll b/llvm/test/CodeGen/RISCV/rvv/vsll.ll index 90fbfc9a1557b..7d899dcd0ba4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsll.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vsll.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv64i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsll_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vsll.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/rvv/vsm.ll index 1f5341e2a332a..5aae41c2e437f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm.ll @@ -4,8 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare void @llvm.riscv.vsm.nxv1i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv1i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: ; CHECK: # %bb.0: # %entry @@ -17,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv2i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv2i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: ; CHECK: # %bb.0: # %entry @@ -30,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv4i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv4i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: ; CHECK: # %bb.0: # %entry @@ -43,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv8i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv8i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: ; CHECK: # %bb.0: # %entry @@ -56,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv16i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv16i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: ; CHECK: # %bb.0: # %entry @@ -69,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv32i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv32i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: ; CHECK: # %bb.0: # %entry @@ -82,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsm.nxv64i1(, ptr, iXLen); - define void @intrinsic_vsm_v_nxv64i1( %0, ptr %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: ; CHECK: # %bb.0: # %entry @@ -95,11 +81,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i16( - , - , - iXLen); - ; Make sure we can use the vsetvli from the producing instruction. define void @test_vsetvli_i16( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i16: @@ -117,11 +98,6 @@ entry: ret void } -declare @llvm.riscv.vmseq.nxv1i32( - , - , - iXLen); - define void @test_vsetvli_i32( %0, %1, ptr %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll index 17d59682c104f..fa8075e7b967a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksh \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm3c.nxv8i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vsm3c_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3c_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -28,13 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vsm3c.nxv16i32.i32( - , - , - iXLen, - iXLen, - iXLen) - define @intrinsic_vsm3c_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3c_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll index 313482f8c6229..418d23e1c91cf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksh \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm3me.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vsm3me_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3me_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsm3me.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vsm3me_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm3me_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll index 34ee021715f89..a3abf6af0bde8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksed \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm4k.nxv4i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vsm4k_vi_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsm4k_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -26,12 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4k.nxv8i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vsm4k_vi_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsm4k_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -48,12 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4k.nxv16i32.i32( - , - , - iXLen, - iXLen) - define @intrinsic_vsm4k_vi_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsm4k_vi_nxv16i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll index bcea335deefa7..007421ed87746 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksed \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vsm4r.vv.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vv_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vv_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -24,11 +19,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vv.nxv8i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vv_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vv_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -44,11 +34,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vv.nxv16i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vv_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vv_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -64,11 +49,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vs.nxv4i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vs_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vs_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -84,11 +64,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vs.nxv8i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vs_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vs_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -104,11 +79,6 @@ entry: ret %a } -declare @llvm.riscv.vsm4r.vs.nxv16i32.nxv4i32( - , - , - iXLen, iXLen); - define @intrinsic_vsm4r_vs_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm4r_vs_nxv16i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul.ll index 0b56a54f08a8c..0606823162521 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul.ll @@ -8,12 +8,6 @@ ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul -declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -31,13 +25,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -56,12 +43,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -79,13 +60,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -104,12 +78,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -127,13 +95,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -152,12 +113,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -175,13 +130,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -200,12 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -223,13 +165,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -248,12 +183,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -271,13 +200,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -296,12 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -345,12 +254,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -368,13 +271,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -393,12 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -416,13 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -441,12 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -464,13 +341,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +359,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -512,13 +376,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -537,12 +394,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -560,13 +411,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -585,12 +429,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -608,13 +446,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -634,12 +465,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -657,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -682,12 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -705,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -730,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -753,13 +552,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -778,12 +570,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -801,13 +587,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -826,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -849,13 +622,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -875,12 +641,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -898,13 +658,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -923,12 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -971,12 +711,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,13 +728,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1019,12 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( - , - , - , - iXLen, iXLen) - define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1068,12 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1091,13 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1116,12 +817,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1139,13 +834,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1164,12 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1187,13 +869,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1212,12 +887,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1235,13 +904,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1260,12 +922,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1283,13 +939,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1308,12 +957,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1331,13 +974,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1356,12 +992,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv64i8.i8( - , - , - i8, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1379,13 +1009,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1404,12 +1027,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1427,13 +1044,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1452,12 +1062,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1475,13 +1079,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1500,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1523,13 +1114,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,12 +1132,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1571,13 +1149,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1596,12 +1167,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1619,13 +1184,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1644,12 +1202,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv32i16.i16( - , - , - i16, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1667,13 +1219,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1692,12 +1237,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1715,13 +1254,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1740,12 +1272,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1763,13 +1289,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1788,12 +1307,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1811,13 +1324,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1836,12 +1342,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1859,13 +1359,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1884,12 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv16i32.i32( - , - , - i32, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1907,13 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1932,12 +1412,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv1i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1968,13 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -2006,12 +1473,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv2i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2042,13 +1503,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2080,12 +1534,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv4i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2116,13 +1564,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2154,12 +1595,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.nxv8i64.i64( - , - , - i64, - iXLen, iXLen) - define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2190,13 +1625,6 @@ entry: ret %a } -declare @llvm.riscv.vsmul.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen, iXLen) - define @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll index f3ad06529210a..949d9be1ce176 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll index 89222711d4d91..a559c8bcd705a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4880,12 +3502,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4902,13 +3518,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4926,12 +3535,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4948,13 +3551,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4972,12 +3568,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4994,13 +3584,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5018,12 +3601,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5040,13 +3617,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5064,12 +3634,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5086,13 +3650,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll index 6b54ce4974f34..9890fc20021e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -802,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -827,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -852,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -877,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -902,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -927,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -952,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -977,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1002,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1027,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1052,9 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1077,9 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1102,9 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1127,9 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1152,9 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1177,9 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1227,9 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1252,9 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1277,9 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1302,9 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1327,9 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1352,9 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1377,9 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1402,9 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1427,9 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1477,9 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1502,9 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1527,9 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1552,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1577,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1602,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1627,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1652,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1677,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1702,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1727,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1777,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1802,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1827,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1877,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1902,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1927,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1952,9 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1977,9 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2002,9 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2027,9 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2052,9 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2077,9 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2102,9 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2127,9 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2152,9 +1894,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1916,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2202,9 +1938,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2227,9 +1960,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2252,9 +1982,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2277,9 +2004,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2302,9 +2026,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2327,9 +2048,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2352,9 +2070,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2377,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2402,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2427,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2452,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2477,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2502,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2527,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2552,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2577,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2602,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2627,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2652,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2702,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2727,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2752,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2444,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2802,9 +2466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2827,9 +2488,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2852,9 +2510,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2877,9 +2532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2554,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2927,9 +2576,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2952,9 +2598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2977,9 +2620,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3002,9 +2642,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3027,9 +2664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3052,9 +2686,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3077,9 +2708,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3102,9 +2730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3127,9 +2752,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3152,9 +2774,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3177,9 +2796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3202,9 +2818,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3227,9 +2840,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3252,9 +2862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3277,9 +2884,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3302,9 +2906,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3327,9 +2928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3352,9 +2950,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3377,9 +2972,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3402,9 +2994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3427,9 +3016,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3452,9 +3038,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3477,9 +3060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3502,9 +3082,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3527,9 +3104,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3552,9 +3126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3577,9 +3148,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3170,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3652,9 +3214,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3677,9 +3236,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3702,9 +3258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3727,9 +3280,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3752,9 +3302,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3777,9 +3324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3802,9 +3346,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3827,9 +3368,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3852,9 +3390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3877,9 +3412,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3902,9 +3434,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3927,9 +3456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3952,9 +3478,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3977,9 +3500,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4002,9 +3522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4027,9 +3544,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,9 +3566,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4077,9 +3588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4102,9 +3610,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4127,9 +3632,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4152,9 +3654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4177,9 +3676,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4202,9 +3698,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4227,9 +3720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4252,9 +3742,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4277,9 +3764,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4302,9 +3786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3808,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3830,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4377,9 +3852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4402,9 +3874,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4427,9 +3896,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4452,9 +3918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4477,9 +3940,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4502,9 +3962,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4527,9 +3984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4552,9 +4006,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4577,9 +4028,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4602,9 +4050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4627,9 +4072,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4652,9 +4094,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4677,9 +4116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4702,9 +4138,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4727,9 +4160,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4752,9 +4182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4777,9 +4204,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4802,9 +4226,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4827,9 +4248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4852,9 +4270,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4877,9 +4292,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4902,9 +4314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4927,9 +4336,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4952,9 +4358,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4977,9 +4380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5002,9 +4402,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5027,9 +4424,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4468,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5102,9 +4490,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5127,9 +4512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5152,9 +4534,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5177,9 +4556,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5202,9 +4578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5227,9 +4600,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5252,9 +4622,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5277,9 +4644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5302,9 +4666,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5327,9 +4688,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5352,9 +4710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5377,9 +4732,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5402,9 +4754,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5427,9 +4776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5452,9 +4798,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5477,9 +4820,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5502,9 +4842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5527,9 +4864,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5552,9 +4886,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5577,9 +4908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5602,9 +4930,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5627,9 +4952,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5652,9 +4974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5677,9 +4996,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5702,9 +5018,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5727,9 +5040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5752,9 +5062,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5084,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5827,9 +5128,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5852,9 +5150,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5877,9 +5172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5902,9 +5194,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5927,9 +5216,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5952,9 +5238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5977,9 +5260,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6002,9 +5282,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6027,9 +5304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6052,9 +5326,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6077,9 +5348,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6102,9 +5370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6127,9 +5392,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6152,9 +5414,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6177,9 +5436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6202,9 +5458,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6227,9 +5480,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6252,9 +5502,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6277,9 +5524,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6302,9 +5546,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6327,9 +5568,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6352,9 +5590,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6377,9 +5612,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6402,9 +5634,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6427,7 +5656,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6450,7 +5678,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6473,7 +5700,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6496,7 +5722,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6519,7 +5744,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6542,7 +5766,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6565,7 +5788,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6588,7 +5810,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6611,7 +5832,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6634,7 +5854,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6657,7 +5876,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6680,7 +5898,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6703,7 +5920,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -6726,7 +5942,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -6749,7 +5964,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -6772,7 +5986,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6795,7 +6008,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6818,7 +6030,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6841,7 +6052,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6864,7 +6074,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6887,7 +6096,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6910,7 +6118,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6933,7 +6140,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6956,7 +6162,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6979,7 +6184,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7002,7 +6206,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7025,7 +6228,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7048,7 +6250,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7071,7 +6272,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7094,7 +6294,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7117,7 +6316,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7140,7 +6338,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7163,7 +6360,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7186,7 +6382,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7209,7 +6404,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7232,7 +6426,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7255,7 +6448,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7278,7 +6470,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7301,7 +6492,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7324,7 +6514,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7347,7 +6536,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7370,7 +6558,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7393,7 +6580,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7416,7 +6602,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7439,7 +6624,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7462,7 +6646,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7485,7 +6668,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7508,7 +6690,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7531,7 +6712,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7554,7 +6734,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7577,7 +6756,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7600,7 +6778,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7623,7 +6800,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7646,7 +6822,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7669,7 +6844,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7692,7 +6866,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7715,7 +6888,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7738,7 +6910,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7761,7 +6932,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7784,7 +6954,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7807,7 +6976,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7830,7 +6998,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7853,7 +7020,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7876,7 +7042,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7899,7 +7064,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7922,7 +7086,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7945,7 +7108,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7130,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7991,7 +7152,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8014,7 +7174,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8037,7 +7196,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8060,7 +7218,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8083,7 +7240,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8106,7 +7262,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8129,7 +7284,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8152,7 +7306,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8175,7 +7328,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8198,7 +7350,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8221,7 +7372,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8244,7 +7394,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8267,7 +7416,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8290,7 +7438,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8313,7 +7460,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8336,7 +7482,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8359,7 +7504,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8382,7 +7526,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8405,7 +7548,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8428,7 +7570,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8451,7 +7592,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8474,7 +7614,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8497,7 +7636,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8520,7 +7658,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8543,7 +7680,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8566,7 +7702,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7724,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8612,7 +7746,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8635,7 +7768,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8658,7 +7790,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8681,7 +7812,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8704,7 +7834,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8727,7 +7856,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8750,7 +7878,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8773,7 +7900,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8796,7 +7922,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8819,7 +7944,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8842,7 +7966,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8865,7 +7988,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8888,7 +8010,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8911,7 +8032,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8934,7 +8054,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8957,7 +8076,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8980,7 +8098,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9003,7 +8120,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9026,7 +8142,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9049,7 +8164,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9072,7 +8186,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9095,7 +8208,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9118,7 +8230,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9141,7 +8252,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9164,7 +8274,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9187,7 +8296,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8318,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9233,7 +8340,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9256,7 +8362,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9279,7 +8384,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9302,7 +8406,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9325,7 +8428,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9348,7 +8450,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9371,7 +8472,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9394,7 +8494,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9417,7 +8516,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9440,7 +8538,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9463,7 +8560,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9486,7 +8582,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9509,7 +8604,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9532,7 +8626,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9555,7 +8648,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9578,7 +8670,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9601,7 +8692,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9624,7 +8714,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9647,7 +8736,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9670,7 +8758,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9693,7 +8780,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9716,7 +8802,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9739,7 +8824,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9762,7 +8846,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9785,7 +8868,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9808,7 +8890,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8912,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9854,7 +8934,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9877,7 +8956,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9900,7 +8978,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9923,7 +9000,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9946,7 +9022,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9969,7 +9044,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9992,7 +9066,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10015,7 +9088,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10038,7 +9110,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10061,7 +9132,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10084,7 +9154,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10107,7 +9176,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10130,7 +9198,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10153,7 +9220,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10176,7 +9242,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10199,7 +9264,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10222,7 +9286,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10245,7 +9308,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10268,7 +9330,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10291,7 +9352,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10314,7 +9374,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10337,7 +9396,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10360,7 +9418,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10383,7 +9440,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10406,7 +9462,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10429,7 +9484,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9506,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10475,7 +9528,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10498,7 +9550,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10521,7 +9572,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10544,7 +9594,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10567,7 +9616,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10590,7 +9638,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10613,7 +9660,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10636,7 +9682,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10659,7 +9704,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10682,7 +9726,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10705,7 +9748,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10728,7 +9770,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10751,7 +9792,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10774,7 +9814,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10797,7 +9836,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10820,7 +9858,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10843,7 +9880,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10866,7 +9902,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10889,7 +9924,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10912,7 +9946,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10935,7 +9968,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10958,7 +9990,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10981,7 +10012,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11004,7 +10034,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11027,7 +10056,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11050,7 +10078,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10100,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11096,7 +10122,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11119,7 +10144,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11142,7 +10166,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11165,7 +10188,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11188,7 +10210,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11211,7 +10232,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11234,7 +10254,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11257,7 +10276,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11280,7 +10298,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11303,7 +10320,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11326,7 +10342,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11349,7 +10364,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11372,7 +10386,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11395,7 +10408,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11418,7 +10430,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11441,7 +10452,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11464,7 +10474,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11487,7 +10496,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11510,7 +10518,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11533,7 +10540,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11556,7 +10562,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11579,7 +10584,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11602,7 +10606,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11625,7 +10628,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11648,7 +10650,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11671,7 +10672,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10694,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11717,7 +10716,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11740,7 +10738,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11763,7 +10760,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11786,7 +10782,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11809,7 +10804,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11832,7 +10826,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11855,7 +10848,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll index 70fb9c2b348d3..a63b6d7ed7efc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -802,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -827,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -852,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -877,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -902,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -927,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -952,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -977,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1002,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1027,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1052,9 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1077,9 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1102,9 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1127,9 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1152,9 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1177,9 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1227,9 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1252,9 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1277,9 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1302,9 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1327,9 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1352,9 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1377,9 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1402,9 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1427,9 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1477,9 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1502,9 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1527,9 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1552,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1577,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1602,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1627,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1652,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1677,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1702,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1727,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1777,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1802,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1827,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1877,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1902,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1927,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1952,9 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1977,9 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2002,9 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2027,9 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2052,9 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2077,9 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2102,9 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2127,9 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2152,9 +1894,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1916,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2202,9 +1938,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2227,9 +1960,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2252,9 +1982,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2277,9 +2004,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2302,9 +2026,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2327,9 +2048,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2352,9 +2070,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2377,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2402,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2427,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2452,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2477,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2502,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2527,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2552,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2577,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2602,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2627,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2652,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2702,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2727,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2752,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2444,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2802,9 +2466,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2827,9 +2488,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2852,9 +2510,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2877,9 +2532,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2554,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2927,9 +2576,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2952,9 +2598,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2977,9 +2620,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3002,9 +2642,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3027,9 +2664,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3052,9 +2686,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3077,9 +2708,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3102,9 +2730,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3127,9 +2752,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3152,9 +2774,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3177,9 +2796,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3202,9 +2818,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3227,9 +2840,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3252,9 +2862,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3277,9 +2884,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3302,9 +2906,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3327,9 +2928,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3352,9 +2950,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3377,9 +2972,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3402,9 +2994,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3427,9 +3016,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3452,9 +3038,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3477,9 +3060,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3502,9 +3082,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3527,9 +3104,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3552,9 +3126,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3577,9 +3148,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3170,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3192,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3652,9 +3214,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3677,9 +3236,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3702,9 +3258,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3727,9 +3280,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3752,9 +3302,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3777,9 +3324,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3802,9 +3346,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3827,9 +3368,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3852,9 +3390,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3877,9 +3412,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3902,9 +3434,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3927,9 +3456,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3952,9 +3478,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3977,9 +3500,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4002,9 +3522,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4027,9 +3544,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4052,9 +3566,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4077,9 +3588,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4102,9 +3610,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4127,9 +3632,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4152,9 +3654,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4177,9 +3676,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4202,9 +3698,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4227,9 +3720,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4252,9 +3742,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4277,9 +3764,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4302,9 +3786,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3808,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3830,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4377,9 +3852,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4402,9 +3874,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4427,9 +3896,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4452,9 +3918,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4477,9 +3940,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4502,9 +3962,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4527,9 +3984,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4552,9 +4006,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4577,9 +4028,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4602,9 +4050,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4627,9 +4072,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4652,9 +4094,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4677,9 +4116,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4702,9 +4138,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4727,9 +4160,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4752,9 +4182,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4777,9 +4204,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4802,9 +4226,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4827,9 +4248,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4852,9 +4270,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4877,9 +4292,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4902,9 +4314,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4927,9 +4336,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4952,9 +4358,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4977,9 +4380,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5002,9 +4402,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5027,9 +4424,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4446,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4468,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5102,9 +4490,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5127,9 +4512,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5152,9 +4534,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5177,9 +4556,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5202,9 +4578,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5227,9 +4600,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5252,9 +4622,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5277,9 +4644,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5302,9 +4666,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5327,9 +4688,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5352,9 +4710,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5377,9 +4732,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5402,9 +4754,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5427,9 +4776,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5452,9 +4798,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5477,9 +4820,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5502,9 +4842,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5527,9 +4864,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5552,9 +4886,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5577,9 +4908,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5602,9 +4930,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5627,9 +4952,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5652,9 +4974,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5677,9 +4996,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5702,9 +5018,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5727,9 +5040,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5752,9 +5062,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5084,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5106,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5827,9 +5128,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5852,9 +5150,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5877,9 +5172,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5902,9 +5194,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -5927,9 +5216,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -5952,9 +5238,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5977,9 +5260,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6002,9 +5282,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6027,9 +5304,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6052,9 +5326,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6077,9 +5348,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6102,9 +5370,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6127,9 +5392,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6152,9 +5414,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6177,9 +5436,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6202,9 +5458,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6227,9 +5480,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6252,9 +5502,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6277,9 +5524,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6302,9 +5546,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6327,9 +5568,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6352,9 +5590,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6377,9 +5612,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6402,9 +5634,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6427,9 +5656,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6452,9 +5678,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6477,9 +5700,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6502,9 +5722,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6527,9 +5744,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6552,9 +5766,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6577,9 +5788,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6602,9 +5810,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6627,9 +5832,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6652,9 +5854,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6677,9 +5876,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6702,9 +5898,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6727,9 +5920,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6752,9 +5942,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6777,9 +5964,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6802,9 +5986,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6827,9 +6008,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6852,9 +6030,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6877,9 +6052,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6902,9 +6074,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6927,9 +6096,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6952,9 +6118,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6977,9 +6140,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7002,9 +6162,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7027,9 +6184,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7052,9 +6206,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7077,9 +6228,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7102,9 +6250,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7127,9 +6272,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7152,9 +6294,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7177,9 +6316,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7202,9 +6338,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7227,9 +6360,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7252,9 +6382,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7277,9 +6404,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7302,9 +6426,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7327,9 +6448,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7352,9 +6470,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7377,9 +6492,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7402,9 +6514,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7427,9 +6536,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7452,9 +6558,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7477,9 +6580,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7502,9 +6602,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7527,9 +6624,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7552,9 +6646,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7577,9 +6668,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7602,9 +6690,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7627,9 +6712,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7652,9 +6734,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7677,9 +6756,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7702,9 +6778,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7727,9 +6800,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7752,9 +6822,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7777,9 +6844,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7802,9 +6866,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7827,9 +6888,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7852,9 +6910,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7877,9 +6932,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7902,9 +6954,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7927,9 +6976,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7952,9 +6998,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7977,9 +7020,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8002,9 +7042,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8027,9 +7064,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8052,9 +7086,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8077,9 +7108,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8102,9 +7130,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8127,9 +7152,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8152,9 +7174,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8177,9 +7196,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8202,9 +7218,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8227,9 +7240,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8252,9 +7262,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8277,9 +7284,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8302,9 +7306,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8327,9 +7328,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8352,9 +7350,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8377,9 +7372,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8402,9 +7394,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8427,9 +7416,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8452,7 +7438,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8475,7 +7460,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8498,7 +7482,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8521,7 +7504,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8544,7 +7526,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8567,7 +7548,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8590,7 +7570,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8613,7 +7592,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8636,7 +7614,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8659,7 +7636,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8682,7 +7658,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8705,7 +7680,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8728,7 +7702,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8751,7 +7724,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8774,7 +7746,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8797,7 +7768,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -8820,7 +7790,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -8843,7 +7812,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -8866,7 +7834,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -8889,7 +7856,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8912,7 +7878,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8935,7 +7900,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8958,7 +7922,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8981,7 +7944,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9004,7 +7966,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9027,7 +7988,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9050,7 +8010,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9073,7 +8032,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9096,7 +8054,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9119,7 +8076,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9142,7 +8098,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9165,7 +8120,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9188,7 +8142,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9211,7 +8164,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9234,7 +8186,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9257,7 +8208,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9280,7 +8230,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9303,7 +8252,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9326,7 +8274,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9349,7 +8296,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9372,7 +8318,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9395,7 +8340,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9418,7 +8362,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9441,7 +8384,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9464,7 +8406,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9487,7 +8428,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9510,7 +8450,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9533,7 +8472,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9556,7 +8494,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9579,7 +8516,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9602,7 +8538,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9625,7 +8560,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9648,7 +8582,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9671,7 +8604,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9694,7 +8626,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9717,7 +8648,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9740,7 +8670,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9763,7 +8692,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9786,7 +8714,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9809,7 +8736,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9832,7 +8758,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9855,7 +8780,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9878,7 +8802,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9901,7 +8824,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9924,7 +8846,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9947,7 +8868,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9970,7 +8890,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9993,7 +8912,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10016,7 +8934,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10039,7 +8956,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10062,7 +8978,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10085,7 +9000,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10108,7 +9022,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10131,7 +9044,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10154,7 +9066,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10177,7 +9088,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10200,7 +9110,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10223,7 +9132,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10246,7 +9154,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10269,7 +9176,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10292,7 +9198,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10315,7 +9220,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10338,7 +9242,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10361,7 +9264,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10384,7 +9286,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10407,7 +9308,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10430,7 +9330,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10453,7 +9352,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10476,7 +9374,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10499,7 +9396,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10522,7 +9418,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10545,7 +9440,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10568,7 +9462,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10591,7 +9484,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10614,7 +9506,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10637,7 +9528,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10660,7 +9550,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10683,7 +9572,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10706,7 +9594,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10729,7 +9616,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10752,7 +9638,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10775,7 +9660,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10798,7 +9682,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10821,7 +9704,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10844,7 +9726,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10867,7 +9748,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10890,7 +9770,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10913,7 +9792,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10936,7 +9814,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10959,7 +9836,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10982,7 +9858,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11005,7 +9880,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11028,7 +9902,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11051,7 +9924,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11074,7 +9946,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11097,7 +9968,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11120,7 +9990,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11143,7 +10012,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11166,7 +10034,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11189,7 +10056,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11212,7 +10078,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11235,7 +10100,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11258,7 +10122,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11281,7 +10144,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11304,7 +10166,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11327,7 +10188,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11350,7 +10210,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11373,7 +10232,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11396,7 +10254,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11419,7 +10276,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11442,7 +10298,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11465,7 +10320,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11488,7 +10342,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11511,7 +10364,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11534,7 +10386,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11557,7 +10408,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11580,7 +10430,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11603,7 +10452,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11626,7 +10474,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11649,7 +10496,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11672,7 +10518,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11695,7 +10540,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11718,7 +10562,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11741,7 +10584,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11764,7 +10606,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11787,7 +10628,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11810,7 +10650,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11833,7 +10672,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11856,7 +10694,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11879,7 +10716,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11902,7 +10738,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11925,7 +10760,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11948,7 +10782,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11971,7 +10804,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11994,7 +10826,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12017,7 +10848,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12040,7 +10870,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12063,7 +10892,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12086,7 +10914,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12109,7 +10936,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12132,7 +10958,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12155,7 +10980,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12178,7 +11002,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12201,7 +11024,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12224,7 +11046,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12247,7 +11068,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12270,7 +11090,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12293,7 +11112,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12316,7 +11134,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12339,7 +11156,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12362,7 +11178,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12385,7 +11200,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12408,7 +11222,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12431,7 +11244,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12454,7 +11266,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12477,7 +11288,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12500,7 +11310,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12523,7 +11332,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12546,7 +11354,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12569,7 +11376,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12592,7 +11398,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12615,7 +11420,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12638,7 +11442,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12661,7 +11464,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12684,7 +11486,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12707,7 +11508,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12730,7 +11530,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12753,7 +11552,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12776,7 +11574,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12799,7 +11596,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12822,7 +11618,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12845,7 +11640,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12868,7 +11662,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12891,7 +11684,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12914,7 +11706,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12937,7 +11728,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12960,7 +11750,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12983,7 +11772,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13006,7 +11794,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13029,7 +11816,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13052,7 +11838,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13075,7 +11860,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13098,7 +11882,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13121,7 +11904,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13144,7 +11926,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13167,7 +11948,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13190,7 +11970,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13213,7 +11992,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13236,7 +12014,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13259,7 +12036,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13282,7 +12058,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13305,7 +12080,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13328,7 +12102,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13351,7 +12124,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13374,7 +12146,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13397,7 +12168,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13420,7 +12190,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13443,7 +12212,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13466,7 +12234,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13489,7 +12256,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13512,7 +12278,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13535,7 +12300,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13558,7 +12322,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13581,7 +12344,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13604,7 +12366,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13627,7 +12388,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13650,7 +12410,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13673,7 +12432,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -13696,7 +12454,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -13719,7 +12476,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -13742,7 +12498,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -13765,7 +12520,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -13788,7 +12542,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -13811,7 +12564,6 @@ entry: ret void } - define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -13834,7 +12586,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13857,7 +12608,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13880,7 +12630,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13903,7 +12652,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13926,7 +12674,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13949,7 +12696,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13972,7 +12718,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13995,7 +12740,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14018,7 +12762,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14041,7 +12784,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14064,7 +12806,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14087,7 +12828,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14110,7 +12850,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14133,7 +12872,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14156,7 +12894,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14179,7 +12916,6 @@ entry: ret void } - define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14202,7 +12938,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14225,7 +12960,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14248,7 +12982,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14271,7 +13004,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14294,7 +13026,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14317,7 +13048,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14340,7 +13070,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14363,7 +13092,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14386,7 +13114,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14409,7 +13136,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14432,7 +13158,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14455,7 +13180,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14478,7 +13202,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14501,7 +13224,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14524,7 +13246,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14547,7 +13268,6 @@ entry: ret void } - define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14570,7 +13290,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14593,7 +13312,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14616,7 +13334,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14639,7 +13356,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14662,7 +13378,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14685,7 +13400,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14708,7 +13422,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14731,7 +13444,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14754,7 +13466,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14777,7 +13488,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14800,7 +13510,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14823,7 +13532,6 @@ entry: ret void } - define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14846,7 +13554,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14869,7 +13576,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14892,7 +13598,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14915,7 +13620,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14938,7 +13642,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14961,7 +13664,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14984,7 +13686,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15007,7 +13708,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15030,7 +13730,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15053,7 +13752,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15076,7 +13774,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15099,7 +13796,6 @@ entry: ret void } - define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15122,7 +13818,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15145,7 +13840,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15168,7 +13862,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15191,7 +13884,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15214,7 +13906,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15237,7 +13928,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15260,7 +13950,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15283,7 +13972,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15306,7 +13994,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15329,7 +14016,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15352,7 +14038,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15375,7 +14060,6 @@ entry: ret void } - define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15398,7 +14082,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15421,7 +14104,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15444,7 +14126,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15467,7 +14148,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15490,7 +14170,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15513,7 +14192,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15536,7 +14214,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15559,7 +14236,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15582,7 +14258,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15605,7 +14280,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15628,7 +14302,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15651,7 +14324,6 @@ entry: ret void } - define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll index b83f8f0779255..c4b4d4f6b328d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll @@ -956,4 +956,3 @@ define @vsra_vv_nxv1i8_sext_zext_mixed_trunc( %vd = call @llvm.vp.trunc.nxv1i8.nxvi16( %vc, %m, i32 %evl) ret %vd } -declare @llvm.vp.trunc.nxv1i8.nxvi16(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll index 961689b15b839..58d6759b34947 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.ashr.nxv8i7(, , , i32) - define @vsra_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i7: ; CHECK: # %bb.0: @@ -23,8 +21,6 @@ define @vsra_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.ashr.nxv1i8(, , , i32) - define @vsra_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i8: ; CHECK: # %bb.0: @@ -89,8 +85,6 @@ define @vsra_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv2i8(, , , i32) - define @vsra_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i8: ; CHECK: # %bb.0: @@ -155,8 +149,6 @@ define @vsra_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv4i8(, , , i32) - define @vsra_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i8: ; CHECK: # %bb.0: @@ -221,8 +213,6 @@ define @vsra_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv8i8(, , , i32) - define @vsra_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i8: ; CHECK: # %bb.0: @@ -287,8 +277,6 @@ define @vsra_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.ashr.nxv16i8(, , , i32) - define @vsra_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i8: ; CHECK: # %bb.0: @@ -353,8 +341,6 @@ define @vsra_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv32i8(, , , i32) - define @vsra_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i8: ; CHECK: # %bb.0: @@ -419,8 +405,6 @@ define @vsra_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv64i8(, , , i32) - define @vsra_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv64i8: ; CHECK: # %bb.0: @@ -485,8 +469,6 @@ define @vsra_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv1i16(, , , i32) - define @vsra_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: @@ -551,8 +533,6 @@ define @vsra_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv2i16(, , , i32) - define @vsra_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i16: ; CHECK: # %bb.0: @@ -617,8 +597,6 @@ define @vsra_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv4i16(, , , i32) - define @vsra_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i16: ; CHECK: # %bb.0: @@ -683,8 +661,6 @@ define @vsra_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv8i16(, , , i32) - define @vsra_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i16: ; CHECK: # %bb.0: @@ -749,8 +725,6 @@ define @vsra_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv16i16(, , , i32) - define @vsra_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i16: ; CHECK: # %bb.0: @@ -815,8 +789,6 @@ define @vsra_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.ashr.nxv32i16(, , , i32) - define @vsra_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i16: ; CHECK: # %bb.0: @@ -881,8 +853,6 @@ define @vsra_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.ashr.nxv1i32(, , , i32) - define @vsra_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i32: ; CHECK: # %bb.0: @@ -947,8 +917,6 @@ define @vsra_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv2i32(, , , i32) - define @vsra_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1013,8 +981,6 @@ define @vsra_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv4i32(, , , i32) - define @vsra_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1079,8 +1045,6 @@ define @vsra_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv8i32(, , , i32) - define @vsra_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1145,8 +1109,6 @@ define @vsra_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv16i32(, , , i32) - define @vsra_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1211,8 +1173,6 @@ define @vsra_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.ashr.nxv1i64(, , , i32) - define @vsra_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1289,8 +1249,6 @@ define @vsra_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv2i64(, , , i32) - define @vsra_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1367,8 +1325,6 @@ define @vsra_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv4i64(, , , i32) - define @vsra_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1445,8 +1401,6 @@ define @vsra_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ashr.nxv6i64(, , , i32) - define @vsra_vv_nxv6i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv6i64: ; CHECK: # %bb.0: @@ -1457,8 +1411,6 @@ define @vsra_vv_nxv6i64( %va, %v } -declare @llvm.vp.ashr.nxv8i64(, , , i32) - define @vsra_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra.ll b/llvm/test/CodeGen/RISCV/rvv/vsra.ll index e0e0500f6c1ae..601956c8de444 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vsra.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv64i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsra_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vsra.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll index f52c02d5d935a..b57f0bee21f5a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.lshr.nxv8i7(, , , i32) - define @vsrl_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vsrl_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.lshr.nxv1i8(, , , i32) - define @vsrl_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i8: ; CHECK: # %bb.0: @@ -88,8 +84,6 @@ define @vsrl_vi_nxv1i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv2i8(, , , i32) - define @vsrl_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i8: ; CHECK: # %bb.0: @@ -154,8 +148,6 @@ define @vsrl_vi_nxv2i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv4i8(, , , i32) - define @vsrl_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i8: ; CHECK: # %bb.0: @@ -220,8 +212,6 @@ define @vsrl_vi_nxv4i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv8i8(, , , i32) - define @vsrl_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i8: ; CHECK: # %bb.0: @@ -286,8 +276,6 @@ define @vsrl_vi_nxv8i8_unmasked( %va, i32 zer ret %v } -declare @llvm.vp.lshr.nxv16i8(, , , i32) - define @vsrl_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i8: ; CHECK: # %bb.0: @@ -352,8 +340,6 @@ define @vsrl_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv32i8(, , , i32) - define @vsrl_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i8: ; CHECK: # %bb.0: @@ -418,8 +404,6 @@ define @vsrl_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv64i8(, , , i32) - define @vsrl_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv64i8: ; CHECK: # %bb.0: @@ -484,8 +468,6 @@ define @vsrl_vi_nxv64i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv1i16(, , , i32) - define @vsrl_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i16: ; CHECK: # %bb.0: @@ -550,8 +532,6 @@ define @vsrl_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv2i16(, , , i32) - define @vsrl_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i16: ; CHECK: # %bb.0: @@ -616,8 +596,6 @@ define @vsrl_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv4i16(, , , i32) - define @vsrl_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i16: ; CHECK: # %bb.0: @@ -682,8 +660,6 @@ define @vsrl_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv8i16(, , , i32) - define @vsrl_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i16: ; CHECK: # %bb.0: @@ -748,8 +724,6 @@ define @vsrl_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv16i16(, , , i32) - define @vsrl_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i16: ; CHECK: # %bb.0: @@ -814,8 +788,6 @@ define @vsrl_vi_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.lshr.nxv32i16(, , , i32) - define @vsrl_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i16: ; CHECK: # %bb.0: @@ -880,8 +852,6 @@ define @vsrl_vi_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.lshr.nxv1i32(, , , i32) - define @vsrl_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i32: ; CHECK: # %bb.0: @@ -946,8 +916,6 @@ define @vsrl_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv2i32(, , , i32) - define @vsrl_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1012,8 +980,6 @@ define @vsrl_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv4i32(, , , i32) - define @vsrl_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1078,8 +1044,6 @@ define @vsrl_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv8i32(, , , i32) - define @vsrl_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1144,8 +1108,6 @@ define @vsrl_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv16i32(, , , i32) - define @vsrl_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1210,8 +1172,6 @@ define @vsrl_vi_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.lshr.nxv1i64(, , , i32) - define @vsrl_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1288,8 +1248,6 @@ define @vsrl_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv2i64(, , , i32) - define @vsrl_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1366,8 +1324,6 @@ define @vsrl_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv4i64(, , , i32) - define @vsrl_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1444,8 +1400,6 @@ define @vsrl_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.lshr.nxv5i64(, , , i32) - define @vsrl_vv_nxv5i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv5i64: ; CHECK: # %bb.0: @@ -1456,8 +1410,6 @@ define @vsrl_vv_nxv5i64( %va, %v } -declare @llvm.vp.lshr.nxv8i64(, , , i32) - define @vsrl_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl.ll index acc5322e0ecb7..bd4a1d1280a33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv64i8( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv64i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv32i16( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv32i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv16i32( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv16i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv1i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1910,14 +1344,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv1i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1935,12 +1361,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv2i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1957,14 +1377,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv2i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1394,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv4i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2004,14 +1410,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv4i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2029,12 +1427,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.nxv8i64( - , - , - iXLen, - iXLen); - define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2051,14 +1443,6 @@ entry: ret %a } -declare @llvm.riscv.vsrl.mask.nxv8i64( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll index 770e06749c348..f11fb6a6613de 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsse.nxv1i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f64( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f64( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f32( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f32( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry @@ -1309,12 +945,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1331,13 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry @@ -1355,12 +978,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32f16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1377,13 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32f16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry @@ -1401,12 +1011,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16: ; CHECK: # %bb.0: # %entry @@ -1447,12 +1044,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1469,13 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16: ; CHECK: # %bb.0: # %entry @@ -1493,12 +1077,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1515,13 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16: ; CHECK: # %bb.0: # %entry @@ -1539,12 +1110,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1561,13 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16: ; CHECK: # %bb.0: # %entry @@ -1585,12 +1143,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1607,13 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16: ; CHECK: # %bb.0: # %entry @@ -1631,12 +1176,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32bf16( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1653,13 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32bf16( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16: ; CHECK: # %bb.0: # %entry @@ -1677,12 +1209,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv1i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1699,13 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv1i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1723,12 +1242,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv2i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1745,13 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv2i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1769,12 +1275,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv4i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1791,13 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv4i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1815,12 +1308,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv8i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1837,13 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv8i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1861,12 +1341,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv16i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1883,13 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv16i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1907,12 +1374,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv32i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1929,13 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv32i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1953,12 +1407,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.nxv64i8( - , - ptr, - iXLen, - iXLen); - define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -1975,13 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsse.mask.nxv64i8( - , - ptr, - iXLen, - , - iXLen); - define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll index 7b80d45a924d3..71e959b84b560 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -802,8 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -826,8 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -850,8 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -874,8 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -898,8 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -922,8 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -946,8 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -970,8 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -994,8 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1018,8 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1042,8 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1066,8 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1090,8 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1114,8 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1138,8 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1162,8 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1186,8 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1234,8 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1258,8 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1306,8 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1330,8 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1378,8 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1402,8 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1426,8 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1450,8 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1474,8 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1498,8 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1546,8 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1570,8 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1618,8 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1642,8 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1690,8 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1714,8 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1738,8 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1762,8 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1786,8 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1810,8 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1858,8 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1882,8 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i32, i32) - define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1930,8 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i32, i32) - define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1954,8 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i32, i32) - define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2002,8 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i32, i32) - define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2026,8 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i32, i32) - define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i32, i32) - define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2074,8 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i32, i32) - define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2098,7 +1894,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2121,7 +1916,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2144,7 +1938,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2167,7 +1960,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2190,7 +1982,6 @@ entry: ret void } - define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2213,7 +2004,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2236,7 +2026,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2259,7 +2048,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2282,7 +2070,6 @@ entry: ret void } - define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2305,7 +2092,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2328,7 +2114,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2351,7 +2136,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2374,7 +2158,6 @@ entry: ret void } - define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2397,7 +2180,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2202,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2443,7 +2224,6 @@ entry: ret void } - define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2466,7 +2246,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2489,7 +2268,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2512,7 +2290,6 @@ entry: ret void } - define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2535,7 +2312,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2558,7 +2334,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2581,7 +2356,6 @@ entry: ret void } - define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2604,7 +2378,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2627,7 +2400,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2650,7 +2422,6 @@ entry: ret void } - define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2673,7 +2444,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2696,7 +2466,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2719,7 +2488,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2742,7 +2510,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2765,7 +2532,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2788,7 +2554,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2811,7 +2576,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2834,7 +2598,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2857,7 +2620,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2880,7 +2642,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2903,7 +2664,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2926,7 +2686,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2949,7 +2708,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2972,7 +2730,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2752,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3018,7 +2774,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3041,7 +2796,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3064,7 +2818,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3087,7 +2840,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3110,7 +2862,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3133,7 +2884,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3156,7 +2906,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3179,7 +2928,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3202,7 +2950,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3225,7 +2972,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3248,7 +2994,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3271,7 +3016,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3294,7 +3038,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3317,7 +3060,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3340,7 +3082,6 @@ entry: ret void } - define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3363,7 +3104,6 @@ entry: ret void } - define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3386,7 +3126,6 @@ entry: ret void } - define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3409,7 +3148,6 @@ entry: ret void } - define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3432,7 +3170,6 @@ entry: ret void } - define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3455,7 +3192,6 @@ entry: ret void } - define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3478,7 +3214,6 @@ entry: ret void } - define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3501,7 +3236,6 @@ entry: ret void } - define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3258,6 @@ entry: ret void } - define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3547,7 +3280,6 @@ entry: ret void } - define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3302,6 @@ entry: ret void } - define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3593,7 +3324,6 @@ entry: ret void } - define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3616,7 +3346,6 @@ entry: ret void } - define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3639,7 +3368,6 @@ entry: ret void } - define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3662,7 +3390,6 @@ entry: ret void } - define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3685,7 +3412,6 @@ entry: ret void } - define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3708,7 +3434,6 @@ entry: ret void } - define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3731,7 +3456,6 @@ entry: ret void } - define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3754,7 +3478,6 @@ entry: ret void } - define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3777,7 +3500,6 @@ entry: ret void } - define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3800,7 +3522,6 @@ entry: ret void } - define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3544,6 @@ entry: ret void } - define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3846,7 +3566,6 @@ entry: ret void } - define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3869,7 +3588,6 @@ entry: ret void } - define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3892,7 +3610,6 @@ entry: ret void } - define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll index 880066bf45990..420d04837965c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -38,9 +35,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -63,9 +57,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -88,9 +79,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -113,9 +101,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -138,9 +123,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -163,9 +145,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -188,9 +167,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -213,9 +189,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -238,9 +211,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -263,9 +233,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -299,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -335,9 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -360,9 +321,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -385,9 +343,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -410,9 +365,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -435,9 +387,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -471,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -496,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -521,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -546,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -582,9 +519,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -607,9 +541,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -632,9 +563,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -657,9 +585,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -693,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -718,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -743,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -768,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -804,9 +717,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -829,9 +739,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -854,9 +761,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -879,8 +783,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -903,8 +805,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -927,8 +827,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -951,8 +849,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -975,8 +871,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -999,8 +893,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -1023,8 +915,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1047,8 +937,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1071,8 +959,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1095,8 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1119,8 +1003,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1143,8 +1025,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1167,8 +1047,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1191,8 +1069,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1215,8 +1091,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1239,8 +1113,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1135,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1287,8 +1157,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1311,8 +1179,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1335,8 +1201,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1359,8 +1223,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1383,8 +1245,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1407,8 +1267,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1431,8 +1289,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1455,8 +1311,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1479,8 +1333,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1503,8 +1355,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1527,8 +1377,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1551,8 +1399,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1421,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1599,8 +1443,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1623,8 +1465,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1647,8 +1487,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1671,8 +1509,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1695,8 +1531,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1719,8 +1553,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1743,8 +1575,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1767,8 +1597,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1791,8 +1619,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1815,8 +1641,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1839,8 +1663,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1863,8 +1685,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1707,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1911,8 +1729,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1935,8 +1751,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1959,8 +1773,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1983,8 +1795,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2007,8 +1817,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64) - define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2031,8 +1839,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2055,8 +1861,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) - define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2079,8 +1883,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) - define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2103,8 +1905,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) - define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2127,8 +1927,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) - define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2151,8 +1949,6 @@ entry: ret void } -declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) - define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2175,7 +1971,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2198,7 +1993,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2221,7 +2015,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2244,7 +2037,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2267,7 +2059,6 @@ entry: ret void } - define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2290,7 +2081,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2313,7 +2103,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2336,7 +2125,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2359,7 +2147,6 @@ entry: ret void } - define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2382,7 +2169,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2405,7 +2191,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2428,7 +2213,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2451,7 +2235,6 @@ entry: ret void } - define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2474,7 +2257,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2497,7 +2279,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2520,7 +2301,6 @@ entry: ret void } - define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2543,7 +2323,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2566,7 +2345,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2589,7 +2367,6 @@ entry: ret void } - define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2612,7 +2389,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2635,7 +2411,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2658,7 +2433,6 @@ entry: ret void } - define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2681,7 +2455,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2704,7 +2477,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2727,7 +2499,6 @@ entry: ret void } - define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2750,7 +2521,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2773,7 +2543,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2796,7 +2565,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2819,7 +2587,6 @@ entry: ret void } - define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2842,7 +2609,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2865,7 +2631,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2888,7 +2653,6 @@ entry: ret void } - define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2911,7 +2675,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2934,7 +2697,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2957,7 +2719,6 @@ entry: ret void } - define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2980,7 +2741,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3003,7 +2763,6 @@ entry: ret void } - define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3026,7 +2785,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3049,7 +2807,6 @@ entry: ret void } - define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3072,7 +2829,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3095,7 +2851,6 @@ entry: ret void } - define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3118,7 +2873,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3141,7 +2895,6 @@ entry: ret void } - define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3164,7 +2917,6 @@ entry: ret void } - define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3187,7 +2939,6 @@ entry: ret void } - define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3210,7 +2961,6 @@ entry: ret void } - define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3233,7 +2983,6 @@ entry: ret void } - define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3256,7 +3005,6 @@ entry: ret void } - define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3279,7 +3027,6 @@ entry: ret void } - define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3302,7 +3049,6 @@ entry: ret void } - define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3325,7 +3071,6 @@ entry: ret void } - define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3348,7 +3093,6 @@ entry: ret void } - define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3371,7 +3115,6 @@ entry: ret void } - define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3394,7 +3137,6 @@ entry: ret void } - define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3417,7 +3159,6 @@ entry: ret void } - define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3440,7 +3181,6 @@ entry: ret void } - define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3463,7 +3203,6 @@ entry: ret void } - define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3486,7 +3225,6 @@ entry: ret void } - define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3509,7 +3247,6 @@ entry: ret void } - define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3532,7 +3269,6 @@ entry: ret void } - define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3555,7 +3291,6 @@ entry: ret void } - define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3578,7 +3313,6 @@ entry: ret void } - define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3601,7 +3335,6 @@ entry: ret void } - define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3624,7 +3357,6 @@ entry: ret void } - define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3647,7 +3379,6 @@ entry: ret void } - define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3670,7 +3401,6 @@ entry: ret void } - define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3693,7 +3423,6 @@ entry: ret void } - define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3716,7 +3445,6 @@ entry: ret void } - define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3739,7 +3467,6 @@ entry: ret void } - define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3762,7 +3489,6 @@ entry: ret void } - define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3785,7 +3511,6 @@ entry: ret void } - define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3808,7 +3533,6 @@ entry: ret void } - define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3831,7 +3555,6 @@ entry: ret void } - define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3854,7 +3577,6 @@ entry: ret void } - define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3877,7 +3599,6 @@ entry: ret void } - define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3900,7 +3621,6 @@ entry: ret void } - define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3923,7 +3643,6 @@ entry: ret void } - define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3946,7 +3665,6 @@ entry: ret void } - define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3969,7 +3687,6 @@ entry: ret void } - define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl) { ; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll index 7fd1b05bb444d..d21a0498474af 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8mf8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i8m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i16m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i16m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i32m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i32m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i64m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) - define @test_vssra_vx_i64m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssra_vv_i8mf8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8mf8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i8m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i8m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i16m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i16m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i32m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i32m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssra_vv_i64m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssra_vx_i64m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll index b7a84e58e6e61..47abf706562ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8mf8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i8m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv64i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i16m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i16m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv32i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i32m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i32m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv16i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv1i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv2i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv4i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i64m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) - define @test_vssra_vx_i64m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssra_vv_i8mf8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8mf8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i8m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i8m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv64i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i16m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i16m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv32i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i32m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i32m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv16i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssra_vv_i64m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vv_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssra_vx_i64m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssra_vx_i64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssra.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll index 0c2cdff65776e..58703158e58a0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8mf8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u8m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16mf4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16mf4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u16m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u16m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32mf2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32mf2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u32m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u32m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m1( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m1( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m2( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m2( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m4( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m4( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u64m8( %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(, , , i32 immarg, i32) - define @test_vssrl_vx_u64m8( %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.i32.i32(, , i32, i32 immarg, i32) - define @test_vssrl_vv_u8mf8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8mf8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u8m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u8m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16mf4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16mf4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u16m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u16m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32mf2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32mf2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u32m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u32m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m1_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m1_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m2_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m2_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m4_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m4_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) - define @test_vssrl_vv_u64m8_m( %mask, %op1, %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(, , , , i32 immarg, i32, i32 immarg) - define @test_vssrl_vx_u64m8_m( %mask, %op1, i32 %shift, i32 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(, , i32, , i32 immarg, i32, i32 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll index fe80854bb2646..66308dc89844d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -14,8 +14,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8mf8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4: ; CHECK: # %bb.0: # %entry @@ -42,8 +38,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4: ; CHECK: # %bb.0: # %entry @@ -56,8 +50,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2: ; CHECK: # %bb.0: # %entry @@ -70,8 +62,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2: ; CHECK: # %bb.0: # %entry @@ -84,8 +74,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1: ; CHECK: # %bb.0: # %entry @@ -98,8 +86,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1: ; CHECK: # %bb.0: # %entry @@ -112,8 +98,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2: ; CHECK: # %bb.0: # %entry @@ -126,8 +110,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2: ; CHECK: # %bb.0: # %entry @@ -140,8 +122,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4: ; CHECK: # %bb.0: # %entry @@ -154,8 +134,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4: ; CHECK: # %bb.0: # %entry @@ -168,8 +146,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8: ; CHECK: # %bb.0: # %entry @@ -182,8 +158,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u8m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8: ; CHECK: # %bb.0: # %entry @@ -196,8 +170,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv64i8.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16mf4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4: ; CHECK: # %bb.0: # %entry @@ -210,8 +182,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16mf4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4: ; CHECK: # %bb.0: # %entry @@ -224,8 +194,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2: ; CHECK: # %bb.0: # %entry @@ -238,8 +206,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2: ; CHECK: # %bb.0: # %entry @@ -252,8 +218,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1: ; CHECK: # %bb.0: # %entry @@ -266,8 +230,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1: ; CHECK: # %bb.0: # %entry @@ -280,8 +242,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2: ; CHECK: # %bb.0: # %entry @@ -294,8 +254,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2: ; CHECK: # %bb.0: # %entry @@ -308,8 +266,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4: ; CHECK: # %bb.0: # %entry @@ -322,8 +278,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4: ; CHECK: # %bb.0: # %entry @@ -336,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u16m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8: ; CHECK: # %bb.0: # %entry @@ -350,8 +302,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u16m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8: ; CHECK: # %bb.0: # %entry @@ -364,8 +314,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv32i16.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32mf2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2: ; CHECK: # %bb.0: # %entry @@ -378,8 +326,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32mf2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2: ; CHECK: # %bb.0: # %entry @@ -392,8 +338,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1: ; CHECK: # %bb.0: # %entry @@ -406,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1: ; CHECK: # %bb.0: # %entry @@ -420,8 +362,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2: ; CHECK: # %bb.0: # %entry @@ -434,8 +374,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2: ; CHECK: # %bb.0: # %entry @@ -448,8 +386,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4: ; CHECK: # %bb.0: # %entry @@ -462,8 +398,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4: ; CHECK: # %bb.0: # %entry @@ -476,8 +410,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u32m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8: ; CHECK: # %bb.0: # %entry @@ -490,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u32m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8: ; CHECK: # %bb.0: # %entry @@ -504,8 +434,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv16i32.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m1( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1: ; CHECK: # %bb.0: # %entry @@ -518,8 +446,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m1( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1: ; CHECK: # %bb.0: # %entry @@ -532,8 +458,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv1i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m2( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2: ; CHECK: # %bb.0: # %entry @@ -546,8 +470,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m2( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2: ; CHECK: # %bb.0: # %entry @@ -560,8 +482,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv2i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m4( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4: ; CHECK: # %bb.0: # %entry @@ -574,8 +494,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m4( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4: ; CHECK: # %bb.0: # %entry @@ -588,8 +506,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv4i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u64m8( %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8: ; CHECK: # %bb.0: # %entry @@ -602,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(, , , i64 immarg, i64) - define @test_vssrl_vx_u64m8( %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8: ; CHECK: # %bb.0: # %entry @@ -616,8 +530,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.nxv8i64.i64.i64(, , i64, i64 immarg, i64) - define @test_vssrl_vv_u8mf8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -630,8 +542,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8mf8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf8_m: ; CHECK: # %bb.0: # %entry @@ -644,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -658,8 +566,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf4_m: ; CHECK: # %bb.0: # %entry @@ -672,8 +578,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -686,8 +590,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8mf2_m: ; CHECK: # %bb.0: # %entry @@ -700,8 +602,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -714,8 +614,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m1_m: ; CHECK: # %bb.0: # %entry @@ -728,8 +626,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -742,8 +638,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m2_m: ; CHECK: # %bb.0: # %entry @@ -756,8 +650,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -770,8 +662,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m4_m: ; CHECK: # %bb.0: # %entry @@ -784,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u8m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -798,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u8m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u8m8_m: ; CHECK: # %bb.0: # %entry @@ -812,8 +698,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16mf4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -826,8 +710,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16mf4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf4_m: ; CHECK: # %bb.0: # %entry @@ -840,8 +722,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -854,8 +734,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16mf2_m: ; CHECK: # %bb.0: # %entry @@ -868,8 +746,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -882,8 +758,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m1_m: ; CHECK: # %bb.0: # %entry @@ -896,8 +770,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -910,8 +782,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m2_m: ; CHECK: # %bb.0: # %entry @@ -924,8 +794,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -938,8 +806,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m4_m: ; CHECK: # %bb.0: # %entry @@ -952,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u16m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -966,8 +830,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u16m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u16m8_m: ; CHECK: # %bb.0: # %entry @@ -980,8 +842,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32mf2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -994,8 +854,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32mf2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32mf2_m: ; CHECK: # %bb.0: # %entry @@ -1008,8 +866,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1022,8 +878,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m1_m: ; CHECK: # %bb.0: # %entry @@ -1036,8 +890,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1050,8 +902,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m2_m: ; CHECK: # %bb.0: # %entry @@ -1064,8 +914,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1078,8 +926,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m4_m: ; CHECK: # %bb.0: # %entry @@ -1092,8 +938,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u32m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1106,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u32m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u32m8_m: ; CHECK: # %bb.0: # %entry @@ -1120,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m1_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1134,8 +974,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m1_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m1_m: ; CHECK: # %bb.0: # %entry @@ -1148,8 +986,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m2_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1162,8 +998,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m2_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m2_m: ; CHECK: # %bb.0: # %entry @@ -1176,8 +1010,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m4_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1190,8 +1022,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m4_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m4_m: ; CHECK: # %bb.0: # %entry @@ -1204,8 +1034,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) - define @test_vssrl_vv_u64m8_m( %mask, %op1, %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vv_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1218,8 +1046,6 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(, , , , i64 immarg, i64, i64 immarg) - define @test_vssrl_vx_u64m8_m( %mask, %op1, i64 %shift, i64 %vl) { ; CHECK-LABEL: test_vssrl_vx_u64m8_m: ; CHECK: # %bb.0: # %entry @@ -1232,4 +1058,3 @@ entry: ret %0 } -declare @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(, , i64, , i64 immarg, i64, i64 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll index a0a583c046c49..2458312a397e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i32, i32, i32) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -802,8 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -826,8 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -850,8 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -874,8 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -898,8 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -922,8 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -946,8 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -970,8 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -994,8 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1018,8 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1042,8 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1066,8 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1090,8 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1114,8 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1138,8 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1162,8 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1186,8 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1234,8 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1258,8 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1306,8 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1330,8 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1378,8 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1402,8 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1426,8 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1450,8 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1474,8 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1498,8 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1546,8 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1570,8 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1618,8 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1642,8 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1690,8 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1714,8 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1738,8 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1762,8 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1786,8 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1810,8 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1858,8 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1882,8 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i32, , i32, i32) - define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1930,8 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i32, , i32, i32) - define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1954,8 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i32, , i32, i32) - define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2002,8 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i32, , i32, i32) - define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2026,8 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i32, , i32, i32) - define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i32, , i32, i32) - define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2074,8 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i32, , i32, i32) - define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2098,7 +1894,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2121,7 +1916,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2144,7 +1938,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2167,7 +1960,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2190,7 +1982,6 @@ entry: ret void } - define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2213,7 +2004,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2236,7 +2026,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2259,7 +2048,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2282,7 +2070,6 @@ entry: ret void } - define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2305,7 +2092,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2328,7 +2114,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2351,7 +2136,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2374,7 +2158,6 @@ entry: ret void } - define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2397,7 +2180,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2202,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2443,7 +2224,6 @@ entry: ret void } - define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2466,7 +2246,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2489,7 +2268,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2512,7 +2290,6 @@ entry: ret void } - define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2535,7 +2312,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2558,7 +2334,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2581,7 +2356,6 @@ entry: ret void } - define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2604,7 +2378,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2627,7 +2400,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2650,7 +2422,6 @@ entry: ret void } - define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2673,7 +2444,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2696,7 +2466,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2719,7 +2488,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2742,7 +2510,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2765,7 +2532,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2788,7 +2554,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2811,7 +2576,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2834,7 +2598,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2857,7 +2620,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2880,7 +2642,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2903,7 +2664,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2926,7 +2686,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2949,7 +2708,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2972,7 +2730,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2752,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3018,7 +2774,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3041,7 +2796,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3064,7 +2818,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3087,7 +2840,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3110,7 +2862,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3133,7 +2884,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3156,7 +2906,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3179,7 +2928,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3202,7 +2950,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3225,7 +2972,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3248,7 +2994,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3271,7 +3016,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3294,7 +3038,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3317,7 +3060,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3340,7 +3082,6 @@ entry: ret void } - define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3363,7 +3104,6 @@ entry: ret void } - define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3386,7 +3126,6 @@ entry: ret void } - define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3409,7 +3148,6 @@ entry: ret void } - define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3432,7 +3170,6 @@ entry: ret void } - define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3455,7 +3192,6 @@ entry: ret void } - define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3478,7 +3214,6 @@ entry: ret void } - define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3501,7 +3236,6 @@ entry: ret void } - define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3258,6 @@ entry: ret void } - define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3547,7 +3280,6 @@ entry: ret void } - define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3302,6 @@ entry: ret void } - define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3593,7 +3324,6 @@ entry: ret void } - define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3616,7 +3346,6 @@ entry: ret void } - define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3639,7 +3368,6 @@ entry: ret void } - define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3662,7 +3390,6 @@ entry: ret void } - define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3685,7 +3412,6 @@ entry: ret void } - define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3708,7 +3434,6 @@ entry: ret void } - define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3731,7 +3456,6 @@ entry: ret void } - define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3754,7 +3478,6 @@ entry: ret void } - define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3777,7 +3500,6 @@ entry: ret void } - define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3800,7 +3522,6 @@ entry: ret void } - define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3544,6 @@ entry: ret void } - define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3846,7 +3566,6 @@ entry: ret void } - define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3869,7 +3588,6 @@ entry: ret void } - define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3892,7 +3610,6 @@ entry: ret void } - define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll index bdd809841d2d6..30c8090325845 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -802,8 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -826,8 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -850,8 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -874,8 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -898,8 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -922,8 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -946,8 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -970,8 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -994,8 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1018,8 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -1042,8 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1066,8 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1090,8 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1114,8 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -1138,8 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1162,8 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1186,8 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1234,8 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1258,8 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -1282,8 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1306,8 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1330,8 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1378,8 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1402,8 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -1426,8 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1450,8 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1474,8 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1498,8 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1546,8 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1570,8 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -1594,8 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1618,8 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -1642,8 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -1690,8 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -1714,8 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -1738,8 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -1762,8 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -1786,8 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -1810,8 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -1858,8 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -1882,8 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -1906,8 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -1930,8 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, i64, , i64, i64) - define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -1954,8 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, i64, , i64, i64) - define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2002,8 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", , 5), ptr, i64, , i64, i64) - define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2026,8 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", , 6), ptr, i64, , i64, i64) - define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2050,8 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", , 7), ptr, i64, , i64, i64) - define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2074,8 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", , 8), ptr, i64, , i64, i64) - define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2098,7 +1894,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -2121,7 +1916,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2144,7 +1938,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2167,7 +1960,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2190,7 +1982,6 @@ entry: ret void } - define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2213,7 +2004,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -2236,7 +2026,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2259,7 +2048,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2282,7 +2070,6 @@ entry: ret void } - define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2305,7 +2092,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -2328,7 +2114,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2351,7 +2136,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2374,7 +2158,6 @@ entry: ret void } - define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2397,7 +2180,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -2420,7 +2202,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2443,7 +2224,6 @@ entry: ret void } - define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2466,7 +2246,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -2489,7 +2268,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2512,7 +2290,6 @@ entry: ret void } - define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2535,7 +2312,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -2558,7 +2334,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -2581,7 +2356,6 @@ entry: ret void } - define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -2604,7 +2378,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -2627,7 +2400,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -2650,7 +2422,6 @@ entry: ret void } - define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -2673,7 +2444,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -2696,7 +2466,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -2719,7 +2488,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -2742,7 +2510,6 @@ entry: ret void } - define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -2765,7 +2532,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -2788,7 +2554,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -2811,7 +2576,6 @@ entry: ret void } - define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -2834,7 +2598,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -2857,7 +2620,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -2880,7 +2642,6 @@ entry: ret void } - define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -2903,7 +2664,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -2926,7 +2686,6 @@ entry: ret void } - define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -2949,7 +2708,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -2972,7 +2730,6 @@ entry: ret void } - define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -2995,7 +2752,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3018,7 +2774,6 @@ entry: ret void } - define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3041,7 +2796,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3064,7 +2818,6 @@ entry: ret void } - define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3087,7 +2840,6 @@ entry: ret void } - define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3110,7 +2862,6 @@ entry: ret void } - define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3133,7 +2884,6 @@ entry: ret void } - define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3156,7 +2906,6 @@ entry: ret void } - define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3179,7 +2928,6 @@ entry: ret void } - define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3202,7 +2950,6 @@ entry: ret void } - define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3225,7 +2972,6 @@ entry: ret void } - define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3248,7 +2994,6 @@ entry: ret void } - define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3271,7 +3016,6 @@ entry: ret void } - define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3294,7 +3038,6 @@ entry: ret void } - define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3317,7 +3060,6 @@ entry: ret void } - define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry @@ -3340,7 +3082,6 @@ entry: ret void } - define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: ; CHECK: # %bb.0: # %entry @@ -3363,7 +3104,6 @@ entry: ret void } - define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: ; CHECK: # %bb.0: # %entry @@ -3386,7 +3126,6 @@ entry: ret void } - define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: ; CHECK: # %bb.0: # %entry @@ -3409,7 +3148,6 @@ entry: ret void } - define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: ; CHECK: # %bb.0: # %entry @@ -3432,7 +3170,6 @@ entry: ret void } - define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: ; CHECK: # %bb.0: # %entry @@ -3455,7 +3192,6 @@ entry: ret void } - define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: ; CHECK: # %bb.0: # %entry @@ -3478,7 +3214,6 @@ entry: ret void } - define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: ; CHECK: # %bb.0: # %entry @@ -3501,7 +3236,6 @@ entry: ret void } - define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: ; CHECK: # %bb.0: # %entry @@ -3524,7 +3258,6 @@ entry: ret void } - define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry @@ -3547,7 +3280,6 @@ entry: ret void } - define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: ; CHECK: # %bb.0: # %entry @@ -3570,7 +3302,6 @@ entry: ret void } - define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: ; CHECK: # %bb.0: # %entry @@ -3593,7 +3324,6 @@ entry: ret void } - define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: ; CHECK: # %bb.0: # %entry @@ -3616,7 +3346,6 @@ entry: ret void } - define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: ; CHECK: # %bb.0: # %entry @@ -3639,7 +3368,6 @@ entry: ret void } - define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: ; CHECK: # %bb.0: # %entry @@ -3662,7 +3390,6 @@ entry: ret void } - define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: ; CHECK: # %bb.0: # %entry @@ -3685,7 +3412,6 @@ entry: ret void } - define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: ; CHECK: # %bb.0: # %entry @@ -3708,7 +3434,6 @@ entry: ret void } - define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: ; CHECK: # %bb.0: # %entry @@ -3731,7 +3456,6 @@ entry: ret void } - define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: ; CHECK: # %bb.0: # %entry @@ -3754,7 +3478,6 @@ entry: ret void } - define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: ; CHECK: # %bb.0: # %entry @@ -3777,7 +3500,6 @@ entry: ret void } - define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: ; CHECK: # %bb.0: # %entry @@ -3800,7 +3522,6 @@ entry: ret void } - define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: ; CHECK: # %bb.0: # %entry @@ -3823,7 +3544,6 @@ entry: ret void } - define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: ; CHECK: # %bb.0: # %entry @@ -3846,7 +3566,6 @@ entry: ret void } - define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: ; CHECK: # %bb.0: # %entry @@ -3869,7 +3588,6 @@ entry: ret void } - define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: ; CHECK: # %bb.0: # %entry @@ -3892,7 +3610,6 @@ entry: ret void } - define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll index 661eca171404f..837016bd41ac4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.ssub.sat.nxv1i8(, ) - define @ssub_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @ssub_nxv1i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i8(, ) - define @ssub_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define @ssub_nxv2i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i8(, ) - define @ssub_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define @ssub_nxv4i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i8(, ) - define @ssub_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define @ssub_nxv8i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv16i8(, ) - define @ssub_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i8_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define @ssub_nxv16i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv32i8(, ) - define @ssub_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv32i8_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define @ssub_nxv32i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv64i8(, ) - define @ssub_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv64i8_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define @ssub_nxv64i8_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv1i16(, ) - define @ssub_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define @ssub_nxv1i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i16(, ) - define @ssub_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i16_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define @ssub_nxv2i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i16(, ) - define @ssub_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i16_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define @ssub_nxv4i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i16(, ) - define @ssub_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i16_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define @ssub_nxv8i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv16i16(, ) - define @ssub_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i16_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define @ssub_nxv16i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv32i16(, ) - define @ssub_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv32i16_vv: ; CHECK: # %bb.0: @@ -459,8 +433,6 @@ define @ssub_nxv32i16_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv1i32(, ) - define @ssub_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i32_vv: ; CHECK: # %bb.0: @@ -494,8 +466,6 @@ define @ssub_nxv1i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i32(, ) - define @ssub_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i32_vv: ; CHECK: # %bb.0: @@ -529,8 +499,6 @@ define @ssub_nxv2i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i32(, ) - define @ssub_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i32_vv: ; CHECK: # %bb.0: @@ -564,8 +532,6 @@ define @ssub_nxv4i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i32(, ) - define @ssub_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i32_vv: ; CHECK: # %bb.0: @@ -599,8 +565,6 @@ define @ssub_nxv8i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv16i32(, ) - define @ssub_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i32_vv: ; CHECK: # %bb.0: @@ -634,8 +598,6 @@ define @ssub_nxv16i32_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv1i64(, ) - define @ssub_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i64_vv: ; CHECK: # %bb.0: @@ -683,8 +645,6 @@ define @ssub_nxv1i64_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv2i64(, ) - define @ssub_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i64_vv: ; CHECK: # %bb.0: @@ -732,8 +692,6 @@ define @ssub_nxv2i64_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv4i64(, ) - define @ssub_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i64_vv: ; CHECK: # %bb.0: @@ -781,8 +739,6 @@ define @ssub_nxv4i64_vi( %va) { ret %v } -declare @llvm.ssub.sat.nxv8i64(, ) - define @ssub_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll index ebf8d5eeb40bc..0ac2ef7e251c0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.ssub.sat.nxv8i7(, , , i32) - define @vssub_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vx_nxv8i7: ; CHECK: # %bb.0: @@ -24,8 +22,6 @@ define @vssub_vx_nxv8i7( %a, i7 signext %b, < ret %v } -declare @llvm.vp.ssub.sat.nxv1i8(, , , i32) - define @vssub_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i8: ; CHECK: # %bb.0: @@ -105,8 +101,6 @@ define @vssub_vi_nxv1i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv2i8(, , , i32) - define @vssub_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i8: ; CHECK: # %bb.0: @@ -173,8 +167,6 @@ define @vssub_vi_nxv2i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv3i8(, , , i32) - define @vssub_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv3i8: ; CHECK: # %bb.0: @@ -241,8 +233,6 @@ define @vssub_vi_nxv3i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv4i8(, , , i32) - define @vssub_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i8: ; CHECK: # %bb.0: @@ -309,8 +299,6 @@ define @vssub_vi_nxv4i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv8i8(, , , i32) - define @vssub_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i8: ; CHECK: # %bb.0: @@ -377,8 +365,6 @@ define @vssub_vi_nxv8i8_unmasked( %va, i32 ze ret %v } -declare @llvm.vp.ssub.sat.nxv16i8(, , , i32) - define @vssub_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv16i8: ; CHECK: # %bb.0: @@ -445,8 +431,6 @@ define @vssub_vi_nxv16i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv32i8(, , , i32) - define @vssub_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv32i8: ; CHECK: # %bb.0: @@ -513,8 +497,6 @@ define @vssub_vi_nxv32i8_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv64i8(, , , i32) - define @vssub_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv64i8: ; CHECK: # %bb.0: @@ -583,8 +565,6 @@ define @vssub_vi_nxv64i8_unmasked( %va, i32 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.ssub.sat.nxv128i8(, , , i32) - define @vssub_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_nxv128i8: ; CHECK: # %bb.0: @@ -635,8 +615,6 @@ define @vssub_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv1i16(, , , i32) - define @vssub_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i16: ; CHECK: # %bb.0: @@ -703,8 +681,6 @@ define @vssub_vi_nxv1i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv2i16(, , , i32) - define @vssub_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i16: ; CHECK: # %bb.0: @@ -771,8 +747,6 @@ define @vssub_vi_nxv2i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv4i16(, , , i32) - define @vssub_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i16: ; CHECK: # %bb.0: @@ -839,8 +813,6 @@ define @vssub_vi_nxv4i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv8i16(, , , i32) - define @vssub_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i16: ; CHECK: # %bb.0: @@ -907,8 +879,6 @@ define @vssub_vi_nxv8i16_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv16i16(, , , i32) - define @vssub_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv16i16: ; CHECK: # %bb.0: @@ -975,8 +945,6 @@ define @vssub_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv32i16(, , , i32) - define @vssub_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1043,8 +1011,6 @@ define @vssub_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv1i32(, , , i32) - define @vssub_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1111,8 +1077,6 @@ define @vssub_vi_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv2i32(, , , i32) - define @vssub_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1179,8 +1143,6 @@ define @vssub_vi_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv4i32(, , , i32) - define @vssub_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1247,8 +1209,6 @@ define @vssub_vi_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv8i32(, , , i32) - define @vssub_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1315,8 +1275,6 @@ define @vssub_vi_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv16i32(, , , i32) - define @vssub_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1385,8 +1343,6 @@ define @vssub_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.ssub.sat.nxv32i32(, , , i32) - define @vssub_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1438,8 +1394,6 @@ define @vssub_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.ssub.sat.nxv1i64(, , , i32) - define @vssub_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1534,8 +1488,6 @@ define @vssub_vi_nxv1i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv2i64(, , , i32) - define @vssub_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1630,8 +1582,6 @@ define @vssub_vi_nxv2i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv4i64(, , , i32) - define @vssub_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1726,8 +1676,6 @@ define @vssub_vi_nxv4i64_unmasked( %va, i32 ret %v } -declare @llvm.vp.ssub.sat.nxv8i64(, , , i32) - define @vssub_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub.ll b/llvm/test/CodeGen/RISCV/rvv/vssub.ll index 0b00f6d801b4b..c0ae21f6e4025 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vssub.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vssub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll index ac6ae6811ccde..3fa74ab285bb1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.usub.sat.nxv1i8(, ) - define @usub_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i8_vv: ; CHECK: # %bb.0: @@ -39,8 +37,6 @@ define @usub_nxv1i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i8(, ) - define @usub_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i8_vv: ; CHECK: # %bb.0: @@ -74,8 +70,6 @@ define @usub_nxv2i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i8(, ) - define @usub_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i8_vv: ; CHECK: # %bb.0: @@ -109,8 +103,6 @@ define @usub_nxv4i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i8(, ) - define @usub_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i8_vv: ; CHECK: # %bb.0: @@ -144,8 +136,6 @@ define @usub_nxv8i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv16i8(, ) - define @usub_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i8_vv: ; CHECK: # %bb.0: @@ -179,8 +169,6 @@ define @usub_nxv16i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv32i8(, ) - define @usub_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv32i8_vv: ; CHECK: # %bb.0: @@ -214,8 +202,6 @@ define @usub_nxv32i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv64i8(, ) - define @usub_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv64i8_vv: ; CHECK: # %bb.0: @@ -249,8 +235,6 @@ define @usub_nxv64i8_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv1i16(, ) - define @usub_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i16_vv: ; CHECK: # %bb.0: @@ -284,8 +268,6 @@ define @usub_nxv1i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i16(, ) - define @usub_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i16_vv: ; CHECK: # %bb.0: @@ -319,8 +301,6 @@ define @usub_nxv2i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i16(, ) - define @usub_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i16_vv: ; CHECK: # %bb.0: @@ -354,8 +334,6 @@ define @usub_nxv4i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i16(, ) - define @usub_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i16_vv: ; CHECK: # %bb.0: @@ -389,8 +367,6 @@ define @usub_nxv8i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv16i16(, ) - define @usub_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i16_vv: ; CHECK: # %bb.0: @@ -424,8 +400,6 @@ define @usub_nxv16i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv32i16(, ) - define @usub_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv32i16_vv: ; CHECK: # %bb.0: @@ -459,8 +433,6 @@ define @usub_nxv32i16_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv1i32(, ) - define @usub_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i32_vv: ; CHECK: # %bb.0: @@ -494,8 +466,6 @@ define @usub_nxv1i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i32(, ) - define @usub_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i32_vv: ; CHECK: # %bb.0: @@ -529,8 +499,6 @@ define @usub_nxv2i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i32(, ) - define @usub_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i32_vv: ; CHECK: # %bb.0: @@ -564,8 +532,6 @@ define @usub_nxv4i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i32(, ) - define @usub_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i32_vv: ; CHECK: # %bb.0: @@ -599,8 +565,6 @@ define @usub_nxv8i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv16i32(, ) - define @usub_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i32_vv: ; CHECK: # %bb.0: @@ -634,8 +598,6 @@ define @usub_nxv16i32_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv1i64(, ) - define @usub_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i64_vv: ; CHECK: # %bb.0: @@ -683,8 +645,6 @@ define @usub_nxv1i64_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv2i64(, ) - define @usub_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i64_vv: ; CHECK: # %bb.0: @@ -732,8 +692,6 @@ define @usub_nxv2i64_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv4i64(, ) - define @usub_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i64_vv: ; CHECK: # %bb.0: @@ -781,8 +739,6 @@ define @usub_nxv4i64_vi( %va) { ret %v } -declare @llvm.usub.sat.nxv8i64(, ) - define @usub_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i64_vv: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll index d54901c93d53c..bde279a4d1f2b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.usub.sat.nxv8i7(, , , i32) - define @vssubu_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vx_nxv8i7: ; CHECK: # %bb.0: @@ -22,8 +20,6 @@ define @vssubu_vx_nxv8i7( %a, i7 signext %b, ret %v } -declare @llvm.vp.usub.sat.nxv1i8(, , , i32) - define @vssubu_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i8: ; CHECK: # %bb.0: @@ -103,8 +99,6 @@ define @vssubu_vi_nxv1i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv2i8(, , , i32) - define @vssubu_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i8: ; CHECK: # %bb.0: @@ -171,8 +165,6 @@ define @vssubu_vi_nxv2i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv3i8(, , , i32) - define @vssubu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv3i8: ; CHECK: # %bb.0: @@ -239,8 +231,6 @@ define @vssubu_vi_nxv3i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv4i8(, , , i32) - define @vssubu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i8: ; CHECK: # %bb.0: @@ -307,8 +297,6 @@ define @vssubu_vi_nxv4i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv8i8(, , , i32) - define @vssubu_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i8: ; CHECK: # %bb.0: @@ -375,8 +363,6 @@ define @vssubu_vi_nxv8i8_unmasked( %va, i32 z ret %v } -declare @llvm.vp.usub.sat.nxv16i8(, , , i32) - define @vssubu_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv16i8: ; CHECK: # %bb.0: @@ -443,8 +429,6 @@ define @vssubu_vi_nxv16i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv32i8(, , , i32) - define @vssubu_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv32i8: ; CHECK: # %bb.0: @@ -511,8 +495,6 @@ define @vssubu_vi_nxv32i8_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv64i8(, , , i32) - define @vssubu_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv64i8: ; CHECK: # %bb.0: @@ -581,8 +563,6 @@ define @vssubu_vi_nxv64i8_unmasked( %va, i3 ; Test that split-legalization works when the mask itself needs splitting. -declare @llvm.vp.usub.sat.nxv128i8(, , , i32) - define @vssubu_vi_nxv128i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_nxv128i8: ; CHECK: # %bb.0: @@ -633,8 +613,6 @@ define @vssubu_vi_nxv128i8_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv1i16(, , , i32) - define @vssubu_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i16: ; CHECK: # %bb.0: @@ -701,8 +679,6 @@ define @vssubu_vi_nxv1i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv2i16(, , , i32) - define @vssubu_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i16: ; CHECK: # %bb.0: @@ -769,8 +745,6 @@ define @vssubu_vi_nxv2i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv4i16(, , , i32) - define @vssubu_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i16: ; CHECK: # %bb.0: @@ -837,8 +811,6 @@ define @vssubu_vi_nxv4i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv8i16(, , , i32) - define @vssubu_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i16: ; CHECK: # %bb.0: @@ -905,8 +877,6 @@ define @vssubu_vi_nxv8i16_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv16i16(, , , i32) - define @vssubu_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv16i16: ; CHECK: # %bb.0: @@ -973,8 +943,6 @@ define @vssubu_vi_nxv16i16_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv32i16(, , , i32) - define @vssubu_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1041,8 +1009,6 @@ define @vssubu_vi_nxv32i16_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv1i32(, , , i32) - define @vssubu_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1109,8 +1075,6 @@ define @vssubu_vi_nxv1i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv2i32(, , , i32) - define @vssubu_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1177,8 +1141,6 @@ define @vssubu_vi_nxv2i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv4i32(, , , i32) - define @vssubu_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1245,8 +1207,6 @@ define @vssubu_vi_nxv4i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv8i32(, , , i32) - define @vssubu_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1313,8 +1273,6 @@ define @vssubu_vi_nxv8i32_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv16i32(, , , i32) - define @vssubu_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1383,8 +1341,6 @@ define @vssubu_vi_nxv16i32_unmasked( %va, ; Test that split-legalization works then the mask needs manual splitting. -declare @llvm.vp.usub.sat.nxv32i32(, , , i32) - define @vssubu_vi_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vi_nxv32i32: ; CHECK: # %bb.0: @@ -1436,8 +1392,6 @@ define @vssubu_vi_nxv32i32_unmasked( %va, ret %v } -declare @llvm.vp.usub.sat.nxv1i64(, , , i32) - define @vssubu_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1532,8 +1486,6 @@ define @vssubu_vi_nxv1i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv2i64(, , , i32) - define @vssubu_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1628,8 +1580,6 @@ define @vssubu_vi_nxv2i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv4i64(, , , i32) - define @vssubu_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1724,8 +1674,6 @@ define @vssubu_vi_nxv4i64_unmasked( %va, i3 ret %v } -declare @llvm.vp.usub.sat.nxv8i64(, , , i32) - define @vssubu_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu.ll index 859329e005aff..699a2fd4f528a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( - , - , - , - iXLen) - define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv64i8.i8( - , - , - i8, - iXLen) - define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv32i16.i16( - , - , - i16, - iXLen) - define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv16i32.i32( - , - , - i32, - iXLen) - define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv1i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv2i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv4i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.nxv8i64.i64( - , - , - i64, - iXLen) - define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vssubu.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen) - define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll index 0207d0864aab4..3430e56b67eae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp-mask.ll @@ -4,9 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK - -declare @llvm.vp.sub.nxv2i1(, , , i32) - define @vsub_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i1: ; CHECK: # %bb.0: @@ -17,8 +14,6 @@ define @vsub_vv_nxv2i1( %va, %v } -declare @llvm.vp.sub.nxv4i1(, , , i32) - define @vsub_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i1: ; CHECK: # %bb.0: @@ -29,8 +24,6 @@ define @vsub_vv_nxv4i1( %va, %v } -declare @llvm.vp.sub.nxv8i1(, , , i32) - define @vsub_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i1: ; CHECK: # %bb.0: @@ -41,8 +34,6 @@ define @vsub_vv_nxv8i1( %va, %v } -declare @llvm.vp.sub.nxv16i1(, , , i32) - define @vsub_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i1: ; CHECK: # %bb.0: @@ -53,8 +44,6 @@ define @vsub_vv_nxv16i1( %va, %v } -declare @llvm.vp.sub.nxv32i1(, , , i32) - define @vsub_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll index e28da6bc4ec64..92fbe88ae9333 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.sub.nxv8i7(, , , i32) - define @vsub_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vsub_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.sub.nxv1i8(, , , i32) - define @vsub_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i8: ; CHECK: # %bb.0: @@ -64,8 +60,6 @@ define @vsub_vx_nxv1i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv2i8(, , , i32) - define @vsub_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i8: ; CHECK: # %bb.0: @@ -110,8 +104,6 @@ define @vsub_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv4i8(, , , i32) - define @vsub_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i8: ; CHECK: # %bb.0: @@ -156,8 +148,6 @@ define @vsub_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv5i8(, , , i32) - define @vsub_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv5i8: ; CHECK: # %bb.0: @@ -202,8 +192,6 @@ define @vsub_vx_nxv5i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv8i8(, , , i32) - define @vsub_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i8: ; CHECK: # %bb.0: @@ -248,8 +236,6 @@ define @vsub_vx_nxv8i8_unmasked( %va, i8 %b, ret %v } -declare @llvm.vp.sub.nxv16i8(, , , i32) - define @vsub_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i8: ; CHECK: # %bb.0: @@ -294,8 +280,6 @@ define @vsub_vx_nxv16i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sub.nxv32i8(, , , i32) - define @vsub_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i8: ; CHECK: # %bb.0: @@ -340,8 +324,6 @@ define @vsub_vx_nxv32i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sub.nxv64i8(, , , i32) - define @vsub_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv64i8: ; CHECK: # %bb.0: @@ -386,8 +368,6 @@ define @vsub_vx_nxv64i8_unmasked( %va, i8 % ret %v } -declare @llvm.vp.sub.nxv1i16(, , , i32) - define @vsub_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i16: ; CHECK: # %bb.0: @@ -432,8 +412,6 @@ define @vsub_vx_nxv1i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv2i16(, , , i32) - define @vsub_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i16: ; CHECK: # %bb.0: @@ -478,8 +456,6 @@ define @vsub_vx_nxv2i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv4i16(, , , i32) - define @vsub_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i16: ; CHECK: # %bb.0: @@ -524,8 +500,6 @@ define @vsub_vx_nxv4i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv8i16(, , , i32) - define @vsub_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i16: ; CHECK: # %bb.0: @@ -570,8 +544,6 @@ define @vsub_vx_nxv8i16_unmasked( %va, i16 ret %v } -declare @llvm.vp.sub.nxv16i16(, , , i32) - define @vsub_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i16: ; CHECK: # %bb.0: @@ -616,8 +588,6 @@ define @vsub_vx_nxv16i16_unmasked( %va, i ret %v } -declare @llvm.vp.sub.nxv32i16(, , , i32) - define @vsub_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i16: ; CHECK: # %bb.0: @@ -662,8 +632,6 @@ define @vsub_vx_nxv32i16_unmasked( %va, i ret %v } -declare @llvm.vp.sub.nxv1i32(, , , i32) - define @vsub_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i32: ; CHECK: # %bb.0: @@ -708,8 +676,6 @@ define @vsub_vx_nxv1i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv2i32(, , , i32) - define @vsub_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i32: ; CHECK: # %bb.0: @@ -754,8 +720,6 @@ define @vsub_vx_nxv2i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv4i32(, , , i32) - define @vsub_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i32: ; CHECK: # %bb.0: @@ -800,8 +764,6 @@ define @vsub_vx_nxv4i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv8i32(, , , i32) - define @vsub_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i32: ; CHECK: # %bb.0: @@ -846,8 +808,6 @@ define @vsub_vx_nxv8i32_unmasked( %va, i32 ret %v } -declare @llvm.vp.sub.nxv16i32(, , , i32) - define @vsub_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i32: ; CHECK: # %bb.0: @@ -892,8 +852,6 @@ define @vsub_vx_nxv16i32_unmasked( %va, i ret %v } -declare @llvm.vp.sub.nxv1i64(, , , i32) - define @vsub_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i64: ; CHECK: # %bb.0: @@ -966,8 +924,6 @@ define @vsub_vx_nxv1i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sub.nxv2i64(, , , i32) - define @vsub_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1040,8 +996,6 @@ define @vsub_vx_nxv2i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sub.nxv4i64(, , , i32) - define @vsub_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i64: ; CHECK: # %bb.0: @@ -1114,8 +1068,6 @@ define @vsub_vx_nxv4i64_unmasked( %va, i64 ret %v } -declare @llvm.vp.sub.nxv8i64(, , , i32) - define @vsub_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll index 6d41d9c2e1c4d..d5b445a4aa233 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vsub.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -327,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -349,13 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -373,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -395,13 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -419,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -441,13 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -465,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -487,13 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -511,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -533,13 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -557,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -579,13 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -604,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -626,13 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -650,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -672,13 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -696,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -718,13 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -742,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -764,13 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -788,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -810,13 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -835,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -857,13 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -881,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -903,13 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -927,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -949,13 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -973,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -995,13 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1020,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1042,13 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1066,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1088,13 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1112,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1134,13 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1180,13 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1204,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1226,13 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1250,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1272,13 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1296,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1318,13 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1342,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1364,13 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1388,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1410,13 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1434,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1456,13 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1480,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1502,13 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1526,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1548,13 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1572,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1594,13 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1618,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1640,13 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1664,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1686,13 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1710,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1732,13 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1756,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1778,13 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1802,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1824,13 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1848,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1882,13 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1918,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1952,13 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1988,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2022,13 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2058,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2092,13 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vsub.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, iXLen); - define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll index 6ba2b405c943e..851bb555116ed 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll @@ -4,12 +4,6 @@ ; The intrinsics are not supported with RV32. -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -67,12 +54,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -89,13 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -113,12 +87,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -135,13 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -159,12 +120,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -181,13 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -205,12 +153,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -227,13 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -251,12 +186,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -273,13 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -297,12 +219,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -319,13 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -343,12 +252,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -365,13 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -389,12 +285,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -411,13 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -435,12 +318,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -457,13 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -481,12 +351,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -503,13 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -527,12 +384,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -549,13 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -573,12 +417,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -595,13 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -619,12 +450,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -665,12 +483,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -687,13 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -711,12 +516,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -733,13 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -757,12 +549,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -779,13 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -803,12 +582,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -825,13 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -849,12 +615,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -871,13 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -895,12 +648,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -917,13 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -941,12 +681,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1bf16.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -963,13 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -987,12 +714,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2bf16.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1009,13 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1033,12 +747,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4bf16.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1055,13 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1079,12 +780,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8bf16.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1101,13 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1125,12 +813,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1147,13 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1171,12 +846,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1193,13 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1217,12 +879,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1239,13 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1263,12 +912,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1285,13 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1309,12 +945,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1331,13 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1355,12 +978,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1377,13 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1401,12 +1011,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1423,13 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1447,12 +1044,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - , - ptr, - , - i64); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1469,13 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - , - ptr, - , - , - i64); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll index 69b1173d9531c..eb178cef9a08f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -832,12 +598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -854,13 +614,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -878,12 +631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -900,13 +647,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -924,12 +664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -946,13 +680,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -970,12 +697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -992,13 +713,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1016,12 +730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1038,13 +746,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1062,12 +763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1084,13 +779,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1108,12 +796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1130,13 +812,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1154,12 +829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1176,13 +845,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1200,12 +862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1222,13 +878,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1246,12 +895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1268,13 +911,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1292,12 +928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1314,13 +944,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1338,12 +961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1360,13 +977,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1384,12 +994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1406,13 +1010,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1430,12 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1452,13 +1043,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1476,12 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1498,13 +1076,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1522,12 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1544,13 +1109,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1568,12 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1590,13 +1142,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1614,12 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1636,13 +1175,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1660,12 +1192,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1682,13 +1208,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1706,12 +1225,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1728,13 +1241,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1752,12 +1258,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1774,13 +1274,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1798,12 +1291,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1820,13 +1307,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1844,12 +1324,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1866,13 +1340,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1890,12 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1912,13 +1373,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1936,12 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1958,13 +1406,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1982,12 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2004,13 +1439,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2028,12 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2050,13 +1472,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2074,12 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2096,13 +1505,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2120,12 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2142,13 +1538,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2166,12 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2188,13 +1571,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2212,12 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2234,13 +1604,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2258,12 +1621,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2280,13 +1637,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2304,12 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2326,13 +1670,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2350,12 +1687,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2372,13 +1703,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2396,12 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2418,13 +1736,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2442,12 +1753,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2464,13 +1769,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2488,12 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2510,13 +1802,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2534,12 +1819,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2556,13 +1835,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2580,12 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2602,13 +1868,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2626,12 +1885,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2648,13 +1901,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2672,12 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2694,13 +1934,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2718,12 +1951,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2740,13 +1967,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -2764,12 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2786,13 +2000,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2810,12 +2017,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2832,13 +2033,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2856,12 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2878,13 +2066,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2902,12 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2924,13 +2099,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2948,12 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2970,13 +2132,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2994,12 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3016,13 +2165,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3040,12 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3062,13 +2198,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3086,12 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3108,13 +2231,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3132,12 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3154,13 +2264,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3178,12 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3200,13 +2297,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3224,12 +2314,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3246,13 +2330,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3270,12 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3292,13 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3316,12 +2380,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3338,13 +2396,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3362,12 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3384,13 +2429,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3408,12 +2446,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3430,13 +2462,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3454,12 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3476,13 +2495,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -3500,12 +2512,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3522,13 +2528,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3546,12 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3568,13 +2561,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3592,12 +2578,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3614,13 +2594,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3638,12 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3660,13 +2627,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3684,12 +2644,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3706,13 +2660,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3730,12 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3752,13 +2693,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -3776,12 +2710,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3798,13 +2726,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3822,12 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3844,13 +2759,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3868,12 +2776,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3890,13 +2792,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3914,12 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3936,13 +2825,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3960,12 +2842,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3982,13 +2858,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4006,12 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4028,13 +2891,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,12 +2908,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4074,13 +2924,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4098,12 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4120,13 +2957,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4144,12 +2974,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4166,13 +2990,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4190,12 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4212,13 +3023,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4236,12 +3040,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4258,13 +3056,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4282,12 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4304,13 +3089,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4328,12 +3106,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4350,13 +3122,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4374,12 +3139,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4396,13 +3155,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4420,12 +3172,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4442,13 +3188,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -4466,12 +3205,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1bf16.nxv1i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4488,13 +3221,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4512,12 +3238,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2bf16.nxv2i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4534,13 +3254,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4558,12 +3271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4bf16.nxv4i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4580,13 +3287,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4604,12 +3304,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8bf16.nxv8i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4626,13 +3320,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4650,12 +3337,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16bf16.nxv16i32( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4672,13 +3353,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16bf16.nxv16i32( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -4696,12 +3370,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4718,13 +3386,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4742,12 +3403,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4764,13 +3419,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4788,12 +3436,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4810,13 +3452,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4834,12 +3469,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4856,13 +3485,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4880,12 +3502,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4902,13 +3518,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -4926,12 +3535,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4948,13 +3551,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4972,12 +3568,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4994,13 +3584,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5018,12 +3601,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5040,13 +3617,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5064,12 +3634,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( - , - ptr, - , - iXLen); - define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5086,13 +3650,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( - , - ptr, - , - , - iXLen); - define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll index 316c7ccb7e415..afbe2377acdcc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,9 +24,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -52,9 +46,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -77,9 +68,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,9 +90,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -127,9 +112,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -152,9 +134,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -177,9 +156,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -202,9 +178,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -227,9 +200,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -252,9 +222,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -277,9 +244,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -302,9 +266,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -327,9 +288,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -352,9 +310,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -377,9 +332,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -402,9 +354,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -427,9 +376,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -452,9 +398,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -477,9 +420,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -502,9 +442,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -527,9 +464,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -552,9 +486,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -577,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -602,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -627,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -652,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -677,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -702,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -727,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -752,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -777,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -802,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -827,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -852,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -877,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -902,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -927,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -952,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -977,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1002,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1027,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1052,9 +926,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1077,9 +948,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1102,9 +970,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1127,9 +992,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1152,9 +1014,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1177,9 +1036,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1202,9 +1058,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1227,9 +1080,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1252,9 +1102,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1277,9 +1124,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1302,9 +1146,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1327,9 +1168,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1352,9 +1190,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1377,9 +1212,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1402,9 +1234,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1427,9 +1256,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1452,9 +1278,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1477,9 +1300,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1502,9 +1322,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1527,9 +1344,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1552,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1577,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1602,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1627,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1652,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1677,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1702,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1727,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1752,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1777,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1802,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1827,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1852,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1877,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1902,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1927,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1952,9 +1718,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1977,9 +1740,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2002,9 +1762,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2027,9 +1784,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2052,9 +1806,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2077,9 +1828,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2102,9 +1850,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2127,9 +1872,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2152,9 +1894,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2177,9 +1916,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2202,9 +1938,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2227,9 +1960,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2252,9 +1982,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2277,9 +2004,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2302,9 +2026,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2327,9 +2048,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2352,9 +2070,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2377,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2402,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2427,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2452,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2477,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2502,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2527,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2552,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2577,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2602,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2627,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2652,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2677,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -2702,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -2727,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -2752,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2777,9 +2444,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2802,9 +2466,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2827,9 +2488,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2852,9 +2510,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2877,9 +2532,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2902,9 +2554,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2927,9 +2576,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2952,9 +2598,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2977,9 +2620,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3002,9 +2642,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3027,9 +2664,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3052,9 +2686,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3077,9 +2708,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3102,9 +2730,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3127,9 +2752,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3152,9 +2774,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3177,9 +2796,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3202,9 +2818,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3227,9 +2840,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3252,9 +2862,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3277,9 +2884,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3302,9 +2906,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3327,9 +2928,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3352,9 +2950,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3377,9 +2972,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3402,9 +2994,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3427,9 +3016,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3452,9 +3038,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3477,9 +3060,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3502,9 +3082,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3527,9 +3104,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3552,9 +3126,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3577,9 +3148,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3602,9 +3170,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3627,9 +3192,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3652,9 +3214,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3677,9 +3236,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3702,9 +3258,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3727,9 +3280,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3752,9 +3302,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3777,9 +3324,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3802,9 +3346,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3827,9 +3368,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3852,9 +3390,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3877,9 +3412,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3902,9 +3434,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3927,9 +3456,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3952,9 +3478,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3977,9 +3500,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4002,9 +3522,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4027,9 +3544,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4052,9 +3566,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4077,9 +3588,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4102,9 +3610,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4127,9 +3632,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4152,9 +3654,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4177,9 +3676,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4202,9 +3698,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4227,9 +3720,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4252,9 +3742,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4277,9 +3764,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4302,9 +3786,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4327,9 +3808,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4352,9 +3830,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4377,9 +3852,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4402,9 +3874,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4427,9 +3896,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4452,9 +3918,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4477,9 +3940,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4502,9 +3962,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4527,9 +3984,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4552,9 +4006,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4577,9 +4028,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4602,9 +4050,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4627,9 +4072,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4652,9 +4094,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4677,9 +4116,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4702,9 +4138,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4727,9 +4160,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4752,9 +4182,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4777,9 +4204,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4802,9 +4226,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4827,9 +4248,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4852,9 +4270,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4877,9 +4292,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4902,9 +4314,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4927,9 +4336,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4952,9 +4358,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4977,9 +4380,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5002,9 +4402,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5027,9 +4424,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5052,9 +4446,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5077,9 +4468,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5102,9 +4490,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5127,9 +4512,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5152,9 +4534,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5177,9 +4556,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5202,9 +4578,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5227,9 +4600,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5252,9 +4622,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5277,9 +4644,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5302,9 +4666,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5327,9 +4688,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5352,9 +4710,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5377,9 +4732,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5402,9 +4754,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5427,9 +4776,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5452,9 +4798,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5477,9 +4820,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5502,9 +4842,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5527,9 +4864,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5552,9 +4886,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5577,9 +4908,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5602,9 +4930,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5627,9 +4952,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5652,9 +4974,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5677,9 +4996,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5702,9 +5018,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5727,9 +5040,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5752,9 +5062,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5777,9 +5084,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5802,9 +5106,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i32, i32) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5827,9 +5128,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5852,9 +5150,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5877,9 +5172,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5902,9 +5194,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5927,9 +5216,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5952,9 +5238,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i32, i32) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5977,9 +5260,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6002,9 +5282,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6027,9 +5304,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6052,9 +5326,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6077,9 +5348,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6102,9 +5370,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i32, i32) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6127,9 +5392,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6152,9 +5414,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6177,9 +5436,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i32, i32) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6202,9 +5458,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6227,9 +5480,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6252,9 +5502,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i32, i32) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6277,9 +5524,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6302,9 +5546,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6327,9 +5568,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i32, i32) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6352,9 +5590,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6377,9 +5612,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6402,9 +5634,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i32, i32) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i32, i32) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6427,7 +5656,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6450,7 +5678,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6473,7 +5700,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6496,7 +5722,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6519,7 +5744,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6542,7 +5766,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6565,7 +5788,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6588,7 +5810,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6611,7 +5832,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6634,7 +5854,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -6657,7 +5876,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -6680,7 +5898,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6703,7 +5920,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -6726,7 +5942,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -6749,7 +5964,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -6772,7 +5986,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6795,7 +6008,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6818,7 +6030,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6841,7 +6052,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6864,7 +6074,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6887,7 +6096,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6910,7 +6118,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6933,7 +6140,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6956,7 +6162,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6979,7 +6184,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7002,7 +6206,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7025,7 +6228,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7048,7 +6250,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7071,7 +6272,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7094,7 +6294,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7117,7 +6316,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7140,7 +6338,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7163,7 +6360,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7186,7 +6382,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7209,7 +6404,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7232,7 +6426,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7255,7 +6448,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -7278,7 +6470,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -7301,7 +6492,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -7324,7 +6514,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7347,7 +6536,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7370,7 +6558,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7393,7 +6580,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7416,7 +6602,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7439,7 +6624,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7462,7 +6646,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7485,7 +6668,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7508,7 +6690,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7531,7 +6712,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7554,7 +6734,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7577,7 +6756,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7600,7 +6778,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7623,7 +6800,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7646,7 +6822,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7669,7 +6844,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7692,7 +6866,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7715,7 +6888,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7738,7 +6910,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7761,7 +6932,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7784,7 +6954,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7807,7 +6976,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7830,7 +6998,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7853,7 +7020,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7876,7 +7042,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7899,7 +7064,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7922,7 +7086,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7945,7 +7108,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7968,7 +7130,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7991,7 +7152,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8014,7 +7174,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8037,7 +7196,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8060,7 +7218,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8083,7 +7240,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8106,7 +7262,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8129,7 +7284,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8152,7 +7306,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8175,7 +7328,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8198,7 +7350,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8221,7 +7372,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8244,7 +7394,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8267,7 +7416,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8290,7 +7438,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8313,7 +7460,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8336,7 +7482,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8359,7 +7504,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8382,7 +7526,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8405,7 +7548,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8428,7 +7570,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8451,7 +7592,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8474,7 +7614,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8497,7 +7636,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8520,7 +7658,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8543,7 +7680,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8566,7 +7702,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8589,7 +7724,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8612,7 +7746,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8635,7 +7768,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8658,7 +7790,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8681,7 +7812,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8704,7 +7834,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8727,7 +7856,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8750,7 +7878,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8773,7 +7900,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8796,7 +7922,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8819,7 +7944,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8842,7 +7966,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8865,7 +7988,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8888,7 +8010,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8911,7 +8032,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8934,7 +8054,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8957,7 +8076,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8980,7 +8098,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9003,7 +8120,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9026,7 +8142,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9049,7 +8164,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9072,7 +8186,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9095,7 +8208,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9118,7 +8230,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9141,7 +8252,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9164,7 +8274,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9187,7 +8296,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9210,7 +8318,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9233,7 +8340,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9256,7 +8362,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9279,7 +8384,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9302,7 +8406,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9325,7 +8428,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9348,7 +8450,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9371,7 +8472,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9394,7 +8494,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9417,7 +8516,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9440,7 +8538,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9463,7 +8560,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9486,7 +8582,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9509,7 +8604,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9532,7 +8626,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9555,7 +8648,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9578,7 +8670,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9601,7 +8692,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9624,7 +8714,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9647,7 +8736,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9670,7 +8758,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9693,7 +8780,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9716,7 +8802,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9739,7 +8824,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9762,7 +8846,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9785,7 +8868,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9808,7 +8890,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9831,7 +8912,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9854,7 +8934,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9877,7 +8956,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9900,7 +8978,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9923,7 +9000,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9946,7 +9022,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9969,7 +9044,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9992,7 +9066,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10015,7 +9088,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10038,7 +9110,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10061,7 +9132,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10084,7 +9154,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10107,7 +9176,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10130,7 +9198,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10153,7 +9220,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10176,7 +9242,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10199,7 +9264,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10222,7 +9286,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10245,7 +9308,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10268,7 +9330,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10291,7 +9352,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10314,7 +9374,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10337,7 +9396,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10360,7 +9418,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10383,7 +9440,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10406,7 +9462,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10429,7 +9484,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -10452,7 +9506,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -10475,7 +9528,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -10498,7 +9550,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10521,7 +9572,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10544,7 +9594,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10567,7 +9616,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10590,7 +9638,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10613,7 +9660,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10636,7 +9682,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10659,7 +9704,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10682,7 +9726,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10705,7 +9748,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -10728,7 +9770,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -10751,7 +9792,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -10774,7 +9814,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10797,7 +9836,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10820,7 +9858,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10843,7 +9880,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10866,7 +9902,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10889,7 +9924,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10912,7 +9946,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10935,7 +9968,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10958,7 +9990,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10981,7 +10012,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11004,7 +10034,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11027,7 +10056,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11050,7 +10078,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11073,7 +10100,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11096,7 +10122,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11119,7 +10144,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11142,7 +10166,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11165,7 +10188,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11188,7 +10210,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11211,7 +10232,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11234,7 +10254,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11257,7 +10276,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11280,7 +10298,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11303,7 +10320,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11326,7 +10342,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11349,7 +10364,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11372,7 +10386,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11395,7 +10408,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11418,7 +10430,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11441,7 +10452,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11464,7 +10474,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11487,7 +10496,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11510,7 +10518,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11533,7 +10540,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11556,7 +10562,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11579,7 +10584,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11602,7 +10606,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11625,7 +10628,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11648,7 +10650,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11671,7 +10672,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11694,7 +10694,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11717,7 +10716,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11740,7 +10738,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11763,7 +10760,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11786,7 +10782,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11809,7 +10804,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11832,7 +10826,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11855,7 +10848,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i32 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll index b297d33611242..a59b70a0319f8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -2,9 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -38,9 +35,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -63,9 +57,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -88,9 +79,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -113,9 +101,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -138,9 +123,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -163,9 +145,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -188,9 +167,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -213,9 +189,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -238,9 +211,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -263,9 +233,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -288,9 +255,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -313,9 +277,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -338,9 +299,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -363,9 +321,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -388,9 +343,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -413,9 +365,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -438,9 +387,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -463,9 +409,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -488,9 +431,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -513,9 +453,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -538,9 +475,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -574,9 +508,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -599,9 +530,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -624,9 +552,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -649,9 +574,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -674,9 +596,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -699,9 +618,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -724,9 +640,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -749,9 +662,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -774,9 +684,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -799,9 +706,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -824,9 +728,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -849,9 +750,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -874,9 +772,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -899,9 +794,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -924,9 +816,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -949,9 +838,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -974,9 +860,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -999,9 +882,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1024,9 +904,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1060,9 +937,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1085,9 +959,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1110,9 +981,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1135,9 +1003,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1160,9 +1025,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1185,9 +1047,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1210,9 +1069,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1235,9 +1091,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1260,9 +1113,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1285,9 +1135,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1310,9 +1157,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1335,9 +1179,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1360,9 +1201,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1385,9 +1223,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1410,9 +1245,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1435,9 +1267,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -1460,9 +1289,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1485,9 +1311,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -1510,9 +1333,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1546,9 +1366,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1571,9 +1388,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1596,9 +1410,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -1621,9 +1432,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -1646,9 +1454,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1671,9 +1476,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1696,9 +1498,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -1721,9 +1520,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -1746,9 +1542,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1771,9 +1564,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1796,9 +1586,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -1821,9 +1608,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -1846,9 +1630,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1871,9 +1652,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1896,9 +1674,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1921,9 +1696,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -1957,9 +1729,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1982,9 +1751,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2007,9 +1773,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2032,9 +1795,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2057,9 +1817,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2082,9 +1839,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2107,9 +1861,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2132,9 +1883,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2157,9 +1905,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2182,9 +1927,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2207,9 +1949,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2232,9 +1971,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2257,9 +1993,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2282,9 +2015,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2307,9 +2037,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2332,9 +2059,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2368,9 +2092,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2393,9 +2114,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2418,9 +2136,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2443,9 +2158,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2468,9 +2180,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2493,9 +2202,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2518,9 +2224,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2543,9 +2246,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2568,9 +2268,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -2593,9 +2290,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -2618,9 +2312,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -2643,9 +2334,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -2668,9 +2356,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -2693,9 +2378,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -2718,9 +2400,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -2743,9 +2422,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -2779,9 +2455,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -2804,9 +2477,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -2829,9 +2499,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -2854,9 +2521,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -2879,9 +2543,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -2904,9 +2565,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -2929,9 +2587,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -2954,9 +2609,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -2979,9 +2631,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3004,9 +2653,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3029,9 +2675,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3054,9 +2697,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3079,9 +2719,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3104,9 +2741,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3129,9 +2763,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3154,9 +2785,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3179,9 +2807,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3204,9 +2829,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3229,9 +2851,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3254,9 +2873,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3279,9 +2895,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3304,9 +2917,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3329,9 +2939,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3354,9 +2961,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3379,9 +2983,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3404,9 +3005,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3429,9 +3027,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3454,9 +3049,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3479,9 +3071,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3504,9 +3093,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -3529,9 +3115,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -3554,9 +3137,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -3579,9 +3159,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -3604,9 +3181,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -3629,9 +3203,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -3654,9 +3225,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -3679,9 +3247,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -3704,9 +3269,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -3729,9 +3291,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -3754,9 +3313,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -3779,9 +3335,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -3804,9 +3357,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -3829,9 +3379,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -3854,9 +3401,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -3879,9 +3423,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -3904,9 +3445,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -3929,9 +3467,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -3954,9 +3489,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -3979,9 +3511,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4004,9 +3533,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4029,9 +3555,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4054,9 +3577,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4079,9 +3599,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4104,9 +3621,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4129,9 +3643,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4154,9 +3665,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4179,9 +3687,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4204,9 +3709,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4229,9 +3731,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4254,9 +3753,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4279,9 +3775,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4304,9 +3797,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4329,9 +3819,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -4354,9 +3841,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -4379,9 +3863,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -4404,9 +3885,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -4429,9 +3907,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4454,9 +3929,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4479,9 +3951,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4504,9 +3973,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4529,9 +3995,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4554,9 +4017,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4579,9 +4039,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4604,9 +4061,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4629,9 +4083,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4654,9 +4105,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4679,9 +4127,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -4704,9 +4149,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -4729,9 +4171,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -4754,9 +4193,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -4779,9 +4215,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -4804,9 +4237,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -4829,9 +4259,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -4854,9 +4281,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -4879,9 +4303,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -4904,9 +4325,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -4929,9 +4347,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -4954,9 +4369,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -4979,9 +4391,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5004,9 +4413,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5029,9 +4435,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5054,9 +4457,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5079,9 +4479,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5104,9 +4501,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5129,9 +4523,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5154,9 +4545,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5179,9 +4567,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5204,9 +4589,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5229,9 +4611,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5254,9 +4633,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5279,9 +4655,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5304,9 +4677,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5329,9 +4699,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5354,9 +4721,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5379,9 +4743,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5404,9 +4765,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5429,9 +4787,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5454,9 +4809,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5479,9 +4831,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5504,9 +4853,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5529,9 +4875,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5554,9 +4897,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5579,9 +4919,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5604,9 +4941,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5629,9 +4963,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -5654,9 +4985,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -5679,9 +5007,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -5704,9 +5029,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -5729,9 +5051,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -5754,9 +5073,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -5779,9 +5095,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -5804,9 +5117,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -5829,9 +5139,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -5854,9 +5161,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -5879,9 +5183,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -5904,9 +5205,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -5929,9 +5227,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -5954,9 +5249,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -5979,9 +5271,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -6004,9 +5293,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -6029,9 +5315,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6054,9 +5337,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6079,9 +5359,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6104,9 +5381,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6129,9 +5403,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6154,9 +5425,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6179,9 +5447,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6204,9 +5469,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6229,9 +5491,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6254,9 +5513,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6279,9 +5535,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6304,9 +5557,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6329,9 +5579,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6354,9 +5601,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6379,9 +5623,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6404,9 +5645,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6429,9 +5667,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6454,9 +5689,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6479,9 +5711,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6504,9 +5733,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6529,9 +5755,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -6554,9 +5777,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -6579,9 +5799,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -6604,9 +5821,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -6629,9 +5843,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6654,9 +5865,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6679,9 +5887,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6704,9 +5909,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6729,9 +5931,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6754,9 +5953,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6779,9 +5975,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -6804,9 +5997,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -6829,9 +6019,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -6854,9 +6041,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -6879,9 +6063,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -6904,9 +6085,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -6929,9 +6107,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -6954,9 +6129,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -6979,9 +6151,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7004,9 +6173,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7029,9 +6195,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7054,9 +6217,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7079,9 +6239,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7104,9 +6261,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7129,9 +6283,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7154,9 +6305,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7179,9 +6327,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7204,9 +6349,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7229,9 +6371,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7254,9 +6393,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7279,9 +6415,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7304,9 +6437,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7329,9 +6459,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7354,9 +6481,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7379,9 +6503,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7404,9 +6525,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7429,9 +6547,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7454,9 +6569,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7479,9 +6591,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7504,9 +6613,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7529,9 +6635,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7554,9 +6657,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7579,9 +6679,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7604,9 +6701,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7629,9 +6723,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -7654,9 +6745,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -7679,9 +6767,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -7704,9 +6789,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -7729,9 +6811,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7754,9 +6833,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7779,9 +6855,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -7804,9 +6877,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -7829,9 +6899,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -7854,9 +6921,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -7879,9 +6943,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -7904,9 +6965,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", , 3), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) - define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -7929,9 +6987,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -7954,9 +7009,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -7979,9 +7031,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8004,9 +7053,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8029,9 +7075,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8054,9 +7097,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8079,9 +7119,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8104,9 +7141,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", , 4), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) - define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8129,9 +7163,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8154,9 +7185,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8179,9 +7207,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8204,9 +7229,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", , 5), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) - define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8229,9 +7251,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8254,9 +7273,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8279,9 +7295,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8304,9 +7317,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", , 6), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) - define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8329,9 +7339,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8354,9 +7361,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8379,9 +7383,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8404,9 +7405,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", , 7), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) - define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8429,9 +7427,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8454,9 +7449,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8479,9 +7471,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8504,9 +7493,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", , 8), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) - define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8529,7 +7515,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8552,7 +7537,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -8575,7 +7559,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -8598,7 +7581,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -8621,7 +7603,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -8644,7 +7625,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -8667,7 +7647,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -8690,7 +7669,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -8713,7 +7691,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -8736,7 +7713,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -8759,7 +7735,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -8782,7 +7757,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -8805,7 +7779,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -8828,7 +7801,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -8851,7 +7823,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -8874,7 +7845,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -8897,7 +7867,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -8920,7 +7889,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -8943,7 +7911,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -8966,7 +7933,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -8989,7 +7955,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9012,7 +7977,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9035,7 +7999,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9058,7 +8021,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9081,7 +8043,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9104,7 +8065,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9127,7 +8087,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9150,7 +8109,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9173,7 +8131,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9196,7 +8153,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9219,7 +8175,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9242,7 +8197,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9265,7 +8219,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9288,7 +8241,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9311,7 +8263,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9334,7 +8285,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9357,7 +8307,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9380,7 +8329,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9403,7 +8351,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9426,7 +8373,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9449,7 +8395,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9472,7 +8417,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9495,7 +8439,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9518,7 +8461,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9541,7 +8483,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9564,7 +8505,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9587,7 +8527,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9610,7 +8549,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -9633,7 +8571,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -9656,7 +8593,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -9679,7 +8615,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -9702,7 +8637,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -9725,7 +8659,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -9748,7 +8681,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -9771,7 +8703,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -9794,7 +8725,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -9817,7 +8747,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -9840,7 +8769,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -9863,7 +8791,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -9886,7 +8813,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -9909,7 +8835,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -9932,7 +8857,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -9955,7 +8879,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -9978,7 +8901,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10001,7 +8923,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10024,7 +8945,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10047,7 +8967,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10070,7 +8989,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10093,7 +9011,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10116,7 +9033,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10139,7 +9055,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10162,7 +9077,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10185,7 +9099,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10208,7 +9121,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10231,7 +9143,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10254,7 +9165,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10277,7 +9187,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10300,7 +9209,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10323,7 +9231,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10346,7 +9253,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10369,7 +9275,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10392,7 +9297,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10415,7 +9319,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10438,7 +9341,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10461,7 +9363,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10484,7 +9385,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10507,7 +9407,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10530,7 +9429,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10553,7 +9451,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10576,7 +9473,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10599,7 +9495,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10622,7 +9517,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10645,7 +9539,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10668,7 +9561,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10691,7 +9583,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10714,7 +9605,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -10737,7 +9627,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -10760,7 +9649,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -10783,7 +9671,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -10806,7 +9693,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -10829,7 +9715,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -10852,7 +9737,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -10875,7 +9759,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -10898,7 +9781,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -10921,7 +9803,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -10944,7 +9825,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -10967,7 +9847,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -10990,7 +9869,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11013,7 +9891,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11036,7 +9913,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11059,7 +9935,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11082,7 +9957,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -11105,7 +9979,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -11128,7 +10001,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -11151,7 +10023,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -11174,7 +10045,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11197,7 +10067,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11220,7 +10089,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11243,7 +10111,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11266,7 +10133,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11289,7 +10155,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11312,7 +10177,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11335,7 +10199,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11358,7 +10221,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11381,7 +10243,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11404,7 +10265,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11427,7 +10287,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11450,7 +10309,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11473,7 +10331,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11496,7 +10353,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11519,7 +10375,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11542,7 +10397,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11565,7 +10419,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11588,7 +10441,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11611,7 +10463,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11634,7 +10485,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -11657,7 +10507,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -11680,7 +10529,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -11703,7 +10551,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -11726,7 +10573,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11749,7 +10595,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11772,7 +10617,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11795,7 +10639,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -11818,7 +10661,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -11841,7 +10683,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -11864,7 +10705,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -11887,7 +10727,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -11910,7 +10749,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -11933,7 +10771,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -11956,7 +10793,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -11979,7 +10815,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12002,7 +10837,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12025,7 +10859,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12048,7 +10881,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12071,7 +10903,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12094,7 +10925,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12117,7 +10947,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12140,7 +10969,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12163,7 +10991,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12186,7 +11013,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12209,7 +11035,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12232,7 +11057,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12255,7 +11079,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12278,7 +11101,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12301,7 +11123,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12324,7 +11145,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12347,7 +11167,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12370,7 +11189,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12393,7 +11211,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12416,7 +11233,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12439,7 +11255,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12462,7 +11277,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12485,7 +11299,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12508,7 +11321,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12531,7 +11343,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12554,7 +11365,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12577,7 +11387,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12600,7 +11409,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12623,7 +11431,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12646,7 +11453,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -12669,7 +11475,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -12692,7 +11497,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -12715,7 +11519,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -12738,7 +11541,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12761,7 +11563,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12784,7 +11585,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12807,7 +11607,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -12830,7 +11629,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -12853,7 +11651,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -12876,7 +11673,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -12899,7 +11695,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -12922,7 +11717,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -12945,7 +11739,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -12968,7 +11761,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -12991,7 +11783,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13014,7 +11805,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13037,7 +11827,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13060,7 +11849,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13083,7 +11871,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13106,7 +11893,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13129,7 +11915,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13152,7 +11937,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13175,7 +11959,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13198,7 +11981,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13221,7 +12003,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13244,7 +12025,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13267,7 +12047,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13290,7 +12069,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13313,7 +12091,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13336,7 +12113,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13359,7 +12135,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13382,7 +12157,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13405,7 +12179,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13428,7 +12201,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13451,7 +12223,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13474,7 +12245,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13497,7 +12267,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13520,7 +12289,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13543,7 +12311,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -13566,7 +12333,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -13589,7 +12355,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -13612,7 +12377,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -13635,7 +12399,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -13658,7 +12421,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -13681,7 +12443,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -13704,7 +12465,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -13727,7 +12487,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -13750,7 +12509,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -13773,7 +12531,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -13796,7 +12553,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -13819,7 +12575,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -13842,7 +12597,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -13865,7 +12619,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -13888,7 +12641,6 @@ entry: ret void } - define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -13911,7 +12663,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -13934,7 +12685,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -13957,7 +12707,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -13980,7 +12729,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14003,7 +12751,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14026,7 +12773,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14049,7 +12795,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14072,7 +12817,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14095,7 +12839,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14118,7 +12861,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14141,7 +12883,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14164,7 +12905,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14187,7 +12927,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14210,7 +12949,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14233,7 +12971,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14256,7 +12993,6 @@ entry: ret void } - define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14279,7 +13015,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14302,7 +13037,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14325,7 +13059,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14348,7 +13081,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14371,7 +13103,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14394,7 +13125,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14417,7 +13147,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14440,7 +13169,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14463,7 +13191,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14486,7 +13213,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14509,7 +13235,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14532,7 +13257,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14555,7 +13279,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -14578,7 +13301,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -14601,7 +13323,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -14624,7 +13345,6 @@ entry: ret void } - define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -14647,7 +13367,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14670,7 +13389,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14693,7 +13411,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14716,7 +13433,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -14739,7 +13455,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -14762,7 +13477,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -14785,7 +13499,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -14808,7 +13521,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -14831,7 +13543,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -14854,7 +13565,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -14877,7 +13587,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -14900,7 +13609,6 @@ entry: ret void } - define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -14923,7 +13631,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -14946,7 +13653,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -14969,7 +13675,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -14992,7 +13697,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15015,7 +13719,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15038,7 +13741,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15061,7 +13763,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15084,7 +13785,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15107,7 +13807,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15130,7 +13829,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15153,7 +13851,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15176,7 +13873,6 @@ entry: ret void } - define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15199,7 +13895,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15222,7 +13917,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15245,7 +13939,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15268,7 +13961,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15291,7 +13983,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15314,7 +14005,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15337,7 +14027,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15360,7 +14049,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15383,7 +14071,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15406,7 +14093,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15429,7 +14115,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15452,7 +14137,6 @@ entry: ret void } - define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -15475,7 +14159,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -15498,7 +14181,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -15521,7 +14203,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -15544,7 +14225,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -15567,7 +14247,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -15590,7 +14269,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -15613,7 +14291,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -15636,7 +14313,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -15659,7 +14335,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -15682,7 +14357,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -15705,7 +14379,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -15728,7 +14401,6 @@ entry: ret void } - define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl) { ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll index ad8097631acd3..4b86cc771d617 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.trunc.nxv2i1.nxv2i16(, , i32) - define @vtrunc_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vtrunc_nxv2i1_nxv2i16_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i1.nxv2i32(, , i32) - define @vtrunc_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vtrunc_nxv2i1_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i1.nxv2i64(, , i32) - define @vtrunc_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll index 1c687ef23bfa8..0c1ca369521f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.trunc.nxv2i7.nxv2i16(, , i32) - define @vtrunc_nxv2i7_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define @vtrunc_nxv2i7_nxv2i16( %a, %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i15(, , i32) - define @vtrunc_nxv2i8_nxv2i15( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define @vtrunc_nxv2i8_nxv2i15( %a, %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i16(, , i32) - define @vtrunc_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: ; CHECK: # %bb.0: @@ -48,8 +42,6 @@ define @vtrunc_nxv2i8_nxv2i16_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i32(, , i32) - define @vtrunc_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: ; CHECK: # %bb.0: @@ -74,8 +66,6 @@ define @vtrunc_nxv2i8_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i8.nxv2i64(, , i32) - define @vtrunc_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: ; CHECK: # %bb.0: @@ -104,8 +94,6 @@ define @vtrunc_nxv2i8_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.trunc.nxv2i16.nxv2i32(, , i32) - define @vtrunc_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: ; CHECK: # %bb.0: @@ -126,8 +114,6 @@ define @vtrunc_nxv2i16_nxv2i32_unmasked( %a ret %v } -declare @llvm.vp.trunc.nxv2i16.nxv2i64(, , i32) - define @vtrunc_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: ; CHECK: # %bb.0: @@ -152,8 +138,6 @@ define @vtrunc_nxv2i16_nxv2i64_unmasked( %a ret %v } -declare @llvm.vp.trunc.nxv15i16.nxv15i64(, , i32) - define @vtrunc_nxv15i16_nxv15i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: ; CHECK: # %bb.0: @@ -185,8 +169,6 @@ define @vtrunc_nxv15i16_nxv15i64( %a, %v } -declare @llvm.vp.trunc.nxv2i32.nxv2i64(, , i32) - define @vtrunc_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: ; CHECK: # %bb.0: @@ -209,8 +191,6 @@ define @vtrunc_nxv2i32_nxv2i64_unmasked( %a ret %v } -declare @llvm.vp.trunc.nxv32i7.nxv32i32(, , i32) - define @vtrunc_nxv32i7_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32: ; CHECK: # %bb.0: @@ -243,8 +223,6 @@ define @vtrunc_nxv32i7_nxv32i32( %a, %v } -declare @llvm.vp.trunc.nxv32i8.nxv32i32(, , i32) - define @vtrunc_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32: ; CHECK: # %bb.0: @@ -277,8 +255,6 @@ define @vtrunc_nxv32i8_nxv32i32( %a, %v } -declare @llvm.vp.trunc.nxv32i32.nxv32i64(, , i32) - define @vtrunc_nxv32i64_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll index 616dc697b2847..d82b1576d4cb5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll @@ -31,8 +31,6 @@ define @vuitofp_nxv2bf16_nxv2i1_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i1(, , i32) - define @vuitofp_nxv2f16_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i1: ; CHECK: # %bb.0: @@ -58,8 +56,6 @@ define @vuitofp_nxv2f16_nxv2i1_unmasked( %v ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i1(, , i32) - define @vuitofp_nxv2f32_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i1: ; CHECK: # %bb.0: @@ -85,8 +81,6 @@ define @vuitofp_nxv2f32_nxv2i1_unmasked( % ret %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i1(, , i32) - define @vuitofp_nxv2f64_nxv2i1( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i1: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll index e1edaaadadf1d..c0c749ebf3186 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -118,8 +118,6 @@ define @vuitofp_nxv2bf16_nxv2i64_unmasked( %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i7(, , i32) - define @vuitofp_nxv2f16_nxv2i7( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i7: ; ZVFH: # %bb.0: @@ -144,8 +142,6 @@ define @vuitofp_nxv2f16_nxv2i7( %va, %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i8(, , i32) - define @vuitofp_nxv2f16_nxv2i8( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i8: ; ZVFH: # %bb.0: @@ -186,8 +182,6 @@ define @vuitofp_nxv2f16_nxv2i8_unmasked( %v ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i16(, , i32) - define @vuitofp_nxv2f16_nxv2i16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i16: ; ZVFH: # %bb.0: @@ -224,8 +218,6 @@ define @vuitofp_nxv2f16_nxv2i16_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i32(, , i32) - define @vuitofp_nxv2f16_nxv2i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i32: ; ZVFH: # %bb.0: @@ -264,8 +256,6 @@ define @vuitofp_nxv2f16_nxv2i32_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f16.nxv2i64(, , i32) - define @vuitofp_nxv2f16_nxv2i64( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv2f16_nxv2i64: ; ZVFH: # %bb.0: @@ -306,8 +296,6 @@ define @vuitofp_nxv2f16_nxv2i64_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i8(, , i32) - define @vuitofp_nxv2f32_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8: ; CHECK: # %bb.0: @@ -330,8 +318,6 @@ define @vuitofp_nxv2f32_nxv2i8_unmasked( % ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i16(, , i32) - define @vuitofp_nxv2f32_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16: ; CHECK: # %bb.0: @@ -354,8 +340,6 @@ define @vuitofp_nxv2f32_nxv2i16_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i32(, , i32) - define @vuitofp_nxv2f32_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32: ; CHECK: # %bb.0: @@ -376,8 +360,6 @@ define @vuitofp_nxv2f32_nxv2i32_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f32.nxv2i64(, , i32) - define @vuitofp_nxv2f32_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64: ; CHECK: # %bb.0: @@ -400,8 +382,6 @@ define @vuitofp_nxv2f32_nxv2i64_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i8(, , i32) - define @vuitofp_nxv2f64_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8: ; CHECK: # %bb.0: @@ -424,8 +404,6 @@ define @vuitofp_nxv2f64_nxv2i8_unmasked( ret %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i16(, , i32) - define @vuitofp_nxv2f64_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16: ; CHECK: # %bb.0: @@ -448,8 +426,6 @@ define @vuitofp_nxv2f64_nxv2i16_unmasked( %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i32(, , i32) - define @vuitofp_nxv2f64_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32: ; CHECK: # %bb.0: @@ -472,8 +448,6 @@ define @vuitofp_nxv2f64_nxv2i32_unmasked( %v } -declare @llvm.vp.uitofp.nxv2f64.nxv2i64(, , i32) - define @vuitofp_nxv2f64_nxv2i64( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64: ; CHECK: # %bb.0: @@ -494,8 +468,6 @@ define @vuitofp_nxv2f64_nxv2i64_unmasked( %v } -declare @llvm.vp.uitofp.nxv32f16.nxv32i32(, , i32) - define @vuitofp_nxv32f16_nxv32i32( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32: ; ZVFH: # %bb.0: @@ -552,8 +524,6 @@ define @vuitofp_nxv32f16_nxv32i32( %va, ret %v } -declare @llvm.vp.uitofp.nxv32f32.nxv32i32(, , i32) - define @vuitofp_nxv32f32_nxv32i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll index 433f5d2717e48..4fca25d0178ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll @@ -37,11 +37,6 @@ bb: ret %tmp4 } -declare @llvm.vp.sext.nxv2i32.nxv2i8(, , i32) -declare @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) -declare @llvm.vp.add.nxv2i32(, , , i32) -declare @llvm.vp.merge.nxv2i32(, , , i32) - define @vwadd_vv_vpnxv2i32_vpnxv2i16_vpnxv2i16( %x, %y, %m, i32 signext %evl) { ; CHECK-LABEL: vwadd_vv_vpnxv2i32_vpnxv2i16_vpnxv2i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.ll index 44742b71f3dcc..94d685e993ae0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll index 578e558aba5ab..05a3e5eac4e44 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll @@ -8,12 +8,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs -early-live-intervals | FileCheck %s -declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -30,14 +24,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -55,12 +41,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -77,14 +57,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -102,12 +74,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -124,14 +90,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -149,12 +107,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -171,14 +123,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -196,12 +140,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -218,14 +156,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -243,12 +173,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -265,14 +189,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -291,12 +207,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -313,14 +223,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -338,12 +240,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -360,14 +256,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -385,12 +273,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -407,14 +289,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -432,12 +306,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -454,14 +322,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -479,12 +339,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -501,14 +355,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -527,12 +373,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -549,14 +389,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -574,12 +406,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -596,14 +422,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -621,12 +439,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -643,14 +455,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -668,12 +472,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -690,14 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -716,12 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -738,14 +522,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -763,12 +539,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -785,14 +555,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -810,12 +572,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -832,14 +588,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -857,12 +605,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -879,14 +621,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -904,12 +638,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -926,14 +654,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -951,12 +671,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -973,14 +687,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -998,12 +704,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1020,14 +720,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1045,12 +737,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1067,14 +753,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1092,12 +770,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1114,14 +786,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1139,12 +803,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1161,14 +819,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1186,12 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1208,14 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1233,12 +869,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1255,14 +885,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1280,12 +902,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1302,14 +918,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1327,12 +935,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1349,14 +951,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1374,12 +968,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1396,14 +984,6 @@ entry: ret %a } -declare @llvm.riscv.vwadd.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll index a4060cb026b70..0c17e46a6db8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll index ab9a038236f80..e19a212b37ac7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vwaddu.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll index 02bc8d2731153..a869b5d8117b3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vwmacc_vv_nxv1i32_unmasked_tu( %a, ; CHECK-LABEL: vwmacc_vv_nxv1i32_unmasked_tu: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll index 58f4e8262b3d1..8b5aa1217cce4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmacc.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vwmacc.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll index 486a5b09b677c..84d8587e64eca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll @@ -4,12 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.sext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.zext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vwmacc_vv_nxv1i32_unmasked_tu( %a, ; CHECK-LABEL: vwmacc_vv_nxv1i32_unmasked_tu: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll index 108ec3d49f36f..b8123735dcf5d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccsu.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll index 125270be4fc85..f9ea353a37bc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll @@ -4,11 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.zext.nxv1i32.nxv1i16(, , i32) -declare @llvm.vp.mul.nxv1i32(, , , i32) -declare @llvm.vp.add.nxv1i32(, , , i32) -declare @llvm.vp.merge.nxv1i32(, , , i32) - define @vwmacc_vv_nxv1i32_unmasked_tu( %a, ; CHECK-LABEL: vwmacc_vv_nxv1i32_unmasked_tu: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll index a308695d315b6..efe60d18b26fc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmaccu.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -709,13 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -732,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -779,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -803,13 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -826,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -850,13 +598,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -873,13 +614,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -897,13 +631,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -920,13 +647,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -944,13 +664,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -967,13 +680,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -991,13 +697,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1014,13 +713,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1038,13 +730,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1061,13 +746,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1085,13 +763,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1108,13 +779,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1132,13 +796,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1155,13 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1179,13 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1202,13 +845,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1226,13 +862,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1249,13 +878,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1273,13 +895,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1296,13 +911,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1320,13 +928,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1343,13 +944,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1367,13 +961,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1390,13 +977,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccu.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll index 18c69b9d92b1b..20346c2097e35 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmaccus.nxv1i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv1i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv2i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv2i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv4i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv4i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv8i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv8i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,13 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv16i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -215,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv16i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv32i16.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -262,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv32i16.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv1i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv1i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -333,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv2i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv2i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -380,13 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv4i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv4i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -427,13 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv8i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv8i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -474,13 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv16i32.i16( - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv16i32.i16( - , - i16, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -521,13 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv1i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -544,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv1i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -568,13 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv2i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -591,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv2i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -615,13 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv4i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -638,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv4i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -662,13 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.nxv8i64.i32( - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -685,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwmaccus.mask.nxv8i64.i32( - , - i32, - , - , - iXLen, iXLen); - define @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul.ll index c1dad7f662140..687ecf5f83ba8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll index 5553a0dca4ca3..01e7a47f11ed6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll index 4302562d44eab..5e4ac670e8830 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll index fb46f61581a9c..53b49286a869f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwredsum.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv4i16.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv2i32.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.nxv1i64.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll index 87d1c6113fbf7..20cc5271ffb6b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwredsumu.nxv4i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -26,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -50,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -72,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -96,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -118,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -142,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -164,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -188,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -210,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -234,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -256,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -280,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv4i16.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -302,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -326,12 +235,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -348,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -372,12 +268,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -394,13 +284,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -418,12 +301,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -440,13 +317,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -464,12 +334,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -486,13 +350,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -510,12 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -532,13 +383,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -556,12 +400,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv2i32.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -578,13 +416,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -602,12 +433,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -624,13 +449,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -648,12 +466,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -670,13 +482,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -694,12 +499,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -716,13 +515,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -740,12 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -762,13 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -786,12 +565,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.nxv1i64.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -808,13 +581,6 @@ entry: ret %a } -declare @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64( - , - , - , - , - iXLen); - define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll index 87bb5fa0238ce..1853465767637 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll @@ -8,8 +8,6 @@ ; i32 -> i64 ; ============================================================================== -declare @llvm.vp.shl.nxv2i64(, , , i32) - define @vwsll_vv_nxv2i64_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv2i64_sext: ; CHECK: # %bb.0: @@ -263,8 +261,6 @@ define @vwsll_vi_nxv2i64( %a, i32 ; ============================================================================== -declare @llvm.vp.shl.nxv4i32(, , , i32) - define @vwsll_vv_nxv4i32_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv4i32_sext: ; CHECK: # %bb.0: @@ -486,13 +482,10 @@ define @vwsll_vi_nxv4i32( %a, %z } - ; ============================================================================== ; i8 -> i16 ; ============================================================================== -declare @llvm.vp.shl.nxv8i16(, , , i32) - define @vwsll_vv_nxv8i16_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv8i16_sext: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll index e51efd15d48ab..85b05c491ea7a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsll.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i16.nxv1i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv1i16_nxv1i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i16.nxv2i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv2i16_nxv2i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i16.nxv4i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv4i16_nxv4i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i16.nxv8i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv8i16_nxv8i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i16.nxv16i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv16i16_nxv16i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv32i16.nxv32i8( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv32i16_nxv32i8( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i32.nxv1i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv1i32_nxv1i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i32.nxv2i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv2i32_nxv2i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i32.nxv4i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv4i32_nxv4i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i32.nxv8i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv8i32_nxv8i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv16i32.nxv16i16( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv16i32_nxv16i16( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv1i64.nxv1i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv1i64_nxv1i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv2i64.nxv2i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv2i64_nxv2i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv4i64.nxv4i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv4i64_nxv4i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.nxv8i64.nxv8i32( - , - , - iXLen, - iXLen); - define @intrinsic_vwsll_vx_nxv8i64_nxv8i32( %0, iXLen%1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32( - , - , - iXLen, - , - iXLen, - iXLen); - define @intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32( %0, %1, iXLen%2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.ll index 8b5cba68576e0..52f36b19de102 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll index d5d712ab3b2d7..9fdbe2edf017e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vwsub.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll index ef4ac20fd1ee3..e7e6f2019013a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -27,14 +21,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -52,12 +38,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -75,14 +55,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -100,12 +72,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -123,14 +89,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -148,12 +106,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -172,14 +124,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -197,12 +141,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -221,14 +159,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -246,12 +176,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -270,14 +194,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -295,12 +211,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -318,14 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -343,12 +245,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -366,14 +262,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -391,12 +279,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -415,14 +297,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -440,12 +314,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -464,14 +332,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -489,12 +349,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -513,14 +367,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -538,12 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -561,14 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -586,12 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -610,14 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -635,12 +453,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -659,14 +471,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -684,12 +488,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -708,14 +506,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -733,12 +523,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -756,14 +540,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -781,12 +557,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -804,14 +574,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -829,12 +591,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -852,14 +608,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -877,12 +625,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -900,14 +642,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -925,12 +659,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -948,14 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -973,12 +693,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -996,14 +710,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1021,12 +727,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1044,14 +744,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1069,12 +761,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1092,14 +778,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1117,12 +795,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1140,14 +812,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1165,12 +829,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1188,14 +846,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1213,12 +863,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1236,14 +880,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1261,12 +897,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1284,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1309,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1332,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1357,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1380,14 +982,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1405,12 +999,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1428,14 +1016,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll index 22688b1e58002..50977a4e35b8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -287,12 +203,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -309,14 +219,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -523,12 +369,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -545,14 +385,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -570,12 +402,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -592,14 +418,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -712,12 +502,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -734,14 +518,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -759,12 +535,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -781,14 +551,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry @@ -806,12 +568,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -828,14 +584,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv32i16.i8( - , - , - i8, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry @@ -1041,12 +733,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1063,14 +749,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry @@ -1088,12 +766,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1110,14 +782,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry @@ -1135,12 +799,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1157,14 +815,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry @@ -1182,12 +832,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv16i32.i16( - , - , - i16, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1204,14 +848,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry @@ -1229,12 +865,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv1i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1251,14 +881,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv1i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry @@ -1276,12 +898,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv2i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1298,14 +914,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv2i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry @@ -1323,12 +931,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv4i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1345,14 +947,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv4i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry @@ -1370,12 +964,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.nxv8i64.i32( - , - , - i32, - iXLen); - define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry @@ -1392,14 +980,6 @@ entry: ret %a } -declare @llvm.riscv.vwsubu.w.mask.nxv8i64.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll index 1694a7af0a0b9..9287ffa4794eb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.vp.xor.nxv8i7(, , , i32) - define @vxor_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i7: ; CHECK: # %bb.0: @@ -18,8 +16,6 @@ define @vxor_vx_nxv8i7( %a, i7 signext %b, %v } -declare @llvm.vp.xor.nxv1i8(, , , i32) - define @vxor_vv_nxv1i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i8: ; CHECK: # %bb.0: @@ -104,8 +100,6 @@ define @vxor_vi_nxv1i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv2i8(, , , i32) - define @vxor_vv_nxv2i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i8: ; CHECK: # %bb.0: @@ -190,8 +184,6 @@ define @vxor_vi_nxv2i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv4i8(, , , i32) - define @vxor_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i8: ; CHECK: # %bb.0: @@ -276,8 +268,6 @@ define @vxor_vi_nxv4i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv8i8(, , , i32) - define @vxor_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i8: ; CHECK: # %bb.0: @@ -362,8 +352,6 @@ define @vxor_vi_nxv8i8_unmasked_1( %va, i32 z ret %v } -declare @llvm.vp.xor.nxv15i8(, , , i32) - define @vxor_vv_nxv15i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv15i8: ; CHECK: # %bb.0: @@ -448,8 +436,6 @@ define @vxor_vi_nxv15i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv16i8(, , , i32) - define @vxor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i8: ; CHECK: # %bb.0: @@ -534,8 +520,6 @@ define @vxor_vi_nxv16i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv32i8(, , , i32) - define @vxor_vv_nxv32i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i8: ; CHECK: # %bb.0: @@ -620,8 +604,6 @@ define @vxor_vi_nxv32i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv64i8(, , , i32) - define @vxor_vv_nxv64i8( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv64i8: ; CHECK: # %bb.0: @@ -706,8 +688,6 @@ define @vxor_vi_nxv64i8_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv1i16(, , , i32) - define @vxor_vv_nxv1i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i16: ; CHECK: # %bb.0: @@ -804,8 +784,6 @@ define @vxor_vi_nxv1i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv2i16(, , , i32) - define @vxor_vv_nxv2i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i16: ; CHECK: # %bb.0: @@ -890,8 +868,6 @@ define @vxor_vi_nxv2i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv4i16(, , , i32) - define @vxor_vv_nxv4i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i16: ; CHECK: # %bb.0: @@ -976,8 +952,6 @@ define @vxor_vi_nxv4i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv8i16(, , , i32) - define @vxor_vv_nxv8i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i16: ; CHECK: # %bb.0: @@ -1062,8 +1036,6 @@ define @vxor_vi_nxv8i16_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv16i16(, , , i32) - define @vxor_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i16: ; CHECK: # %bb.0: @@ -1148,8 +1120,6 @@ define @vxor_vi_nxv16i16_unmasked_1( %va, ret %v } -declare @llvm.vp.xor.nxv32i16(, , , i32) - define @vxor_vv_nxv32i16( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i16: ; CHECK: # %bb.0: @@ -1234,8 +1204,6 @@ define @vxor_vi_nxv32i16_unmasked_1( %va, ret %v } -declare @llvm.vp.xor.nxv1i32(, , , i32) - define @vxor_vv_nxv1i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i32: ; CHECK: # %bb.0: @@ -1320,8 +1288,6 @@ define @vxor_vi_nxv1i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv2i32(, , , i32) - define @vxor_vv_nxv2i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i32: ; CHECK: # %bb.0: @@ -1406,8 +1372,6 @@ define @vxor_vi_nxv2i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv4i32(, , , i32) - define @vxor_vv_nxv4i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i32: ; CHECK: # %bb.0: @@ -1492,8 +1456,6 @@ define @vxor_vi_nxv4i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv8i32(, , , i32) - define @vxor_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i32: ; CHECK: # %bb.0: @@ -1578,8 +1540,6 @@ define @vxor_vi_nxv8i32_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv16i32(, , , i32) - define @vxor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i32: ; CHECK: # %bb.0: @@ -1664,8 +1624,6 @@ define @vxor_vi_nxv16i32_unmasked_1( %va, ret %v } -declare @llvm.vp.xor.nxv1i64(, , , i32) - define @vxor_vv_nxv1i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i64: ; CHECK: # %bb.0: @@ -1778,8 +1736,6 @@ define @vxor_vi_nxv1i64_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv2i64(, , , i32) - define @vxor_vv_nxv2i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i64: ; CHECK: # %bb.0: @@ -1892,8 +1848,6 @@ define @vxor_vi_nxv2i64_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv4i64(, , , i32) - define @vxor_vv_nxv4i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i64: ; CHECK: # %bb.0: @@ -2006,8 +1960,6 @@ define @vxor_vi_nxv4i64_unmasked_1( %va, i3 ret %v } -declare @llvm.vp.xor.nxv8i64(, , , i32) - define @vxor_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor.ll b/llvm/test/CodeGen/RISCV/rvv/vxor.ll index 05fdbac438743..da78f57df69ea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vxor.nxv1i8.nxv1i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -26,14 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i8.nxv1i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry @@ -51,12 +37,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i8.nxv2i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -73,14 +53,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i8.nxv2i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry @@ -98,12 +70,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i8.nxv4i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -120,14 +86,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i8.nxv4i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -145,12 +103,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i8.nxv8i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -167,14 +119,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i8.nxv8i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -192,12 +136,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i8.nxv16i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -214,14 +152,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i8.nxv16i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -239,12 +169,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i8.nxv32i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -261,14 +185,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i8.nxv32i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -286,12 +202,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv64i8.nxv64i8( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -308,14 +218,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv64i8.nxv64i8( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry @@ -334,12 +236,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i16.nxv1i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -356,14 +252,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i16.nxv1i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -381,12 +269,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i16.nxv2i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -403,14 +285,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i16.nxv2i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -428,12 +302,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i16.nxv4i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -450,14 +318,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i16.nxv4i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -475,12 +335,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i16.nxv8i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -497,14 +351,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i16.nxv8i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -522,12 +368,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i16.nxv16i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -544,14 +384,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i16.nxv16i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -569,12 +401,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i16.nxv32i16( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -591,14 +417,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i16.nxv32i16( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -617,12 +435,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i32.nxv1i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -639,14 +451,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i32.nxv1i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -664,12 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i32.nxv2i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -686,14 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i32.nxv2i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -711,12 +501,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i32.nxv4i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -733,14 +517,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i32.nxv4i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -758,12 +534,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i32.nxv8i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -780,14 +550,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i32.nxv8i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -805,12 +567,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i32.nxv16i32( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -827,14 +583,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i32.nxv16i32( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -853,12 +601,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i64.nxv1i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -875,14 +617,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i64.nxv1i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -900,12 +634,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i64.nxv2i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -922,14 +650,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i64.nxv2i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -947,12 +667,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i64.nxv4i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -969,14 +683,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i64.nxv4i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -994,12 +700,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i64.nxv8i64( - , - , - , - iXLen); - define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1016,14 +716,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i64.nxv8i64( - , - , - , - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -1042,12 +734,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1064,14 +750,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -1089,12 +767,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1111,14 +783,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry @@ -1136,12 +800,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1158,14 +816,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry @@ -1183,12 +833,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1205,14 +849,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry @@ -1230,12 +866,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1252,14 +882,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry @@ -1277,12 +899,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1299,14 +915,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry @@ -1324,12 +932,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv64i8.i8( - , - , - i8, - iXLen); - define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1346,14 +948,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv64i8.i8( - , - , - i8, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry @@ -1371,12 +965,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1393,14 +981,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry @@ -1418,12 +998,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1440,14 +1014,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry @@ -1465,12 +1031,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1487,14 +1047,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry @@ -1512,12 +1064,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1534,14 +1080,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry @@ -1559,12 +1097,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1581,14 +1113,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry @@ -1606,12 +1130,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv32i16.i16( - , - , - i16, - iXLen); - define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1628,14 +1146,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv32i16.i16( - , - , - i16, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry @@ -1653,12 +1163,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1675,14 +1179,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry @@ -1700,12 +1196,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1722,14 +1212,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry @@ -1747,12 +1229,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1769,14 +1245,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry @@ -1794,12 +1262,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1816,14 +1278,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry @@ -1841,12 +1295,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv16i32.i32( - , - , - i32, - iXLen); - define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1863,14 +1311,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv16i32.i32( - , - , - i32, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry @@ -1888,12 +1328,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv1i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1922,14 +1356,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv1i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry @@ -1959,12 +1385,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv2i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -1993,14 +1413,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv2i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64: ; RV32: # %bb.0: # %entry @@ -2030,12 +1442,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv4i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2064,14 +1470,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv4i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64: ; RV32: # %bb.0: # %entry @@ -2101,12 +1499,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.nxv8i64.i64( - , - , - i64, - iXLen); - define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry @@ -2135,14 +1527,6 @@ entry: ret %a } -declare @llvm.riscv.vxor.mask.nxv8i64.i64( - , - , - i64, - , - iXLen, - iXLen); - define @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { ; RV32-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64: ; RV32: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll index 34461c7a7a312..46609758391a1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll @@ -5,17 +5,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 -declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); -declare @llvm.riscv.vasub.nxv1i8.nxv1i8( - , - , - , - iXLen, iXLen); - ; Test same rounding mode in one block. define @test1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: test1: @@ -417,10 +406,6 @@ for.end: ret void } -declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen immarg, iXLen immarg) -declare @llvm.riscv.vle.nxv1i8.iXLen(, ptr nocapture, iXLen) -declare void @llvm.riscv.vse.nxv1i8.iXLen(, ptr nocapture, iXLen) - ; Test loop with dominating vxrm write. Make sure there is no write in the loop. define void @test11(ptr nocapture %ptr_dest, ptr nocapture readonly %ptr_op1, ptr nocapture readonly %ptr_op2, iXLen %n) { ; CHECK-LABEL: test11: diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll index e14236c0258c4..e5741bcdc82ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.zext.nxv2i16.nxv2i1(, , i32) - define @vzext_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vzext_nxv2i1_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i32.nxv2i1(, , i32) - define @vzext_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vzext_nxv2i1_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i1(, , i32) - define @vzext_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll index 34337b1af1df5..9713b617b8384 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -declare @llvm.vp.zext.nxv2i16.nxv2i8(, , i32) - define @vzext_nxv2i8_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: @@ -26,8 +24,6 @@ define @vzext_nxv2i8_nxv2i16_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i32.nxv2i8(, , i32) - define @vzext_nxv2i8_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: @@ -50,8 +46,6 @@ define @vzext_nxv2i8_nxv2i32_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i8(, , i32) - define @vzext_nxv2i8_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: @@ -74,8 +68,6 @@ define @vzext_nxv2i8_nxv2i64_unmasked( %a, i ret %v } -declare @llvm.vp.zext.nxv2i32.nxv2i16(, , i32) - define @vzext_nxv2i16_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: @@ -98,8 +90,6 @@ define @vzext_nxv2i16_nxv2i32_unmasked( %a, ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i16(, , i32) - define @vzext_nxv2i16_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: @@ -122,8 +112,6 @@ define @vzext_nxv2i16_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.zext.nxv2i64.nxv2i32(, , i32) - define @vzext_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: @@ -146,8 +134,6 @@ define @vzext_nxv2i32_nxv2i64_unmasked( %a, ret %v } -declare @llvm.vp.zext.nxv32i32.nxv32i8(, , i32) - define @vzext_nxv32i8_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv32i8_nxv32i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext.ll b/llvm/test/CodeGen/RISCV/rvv/vzext.ll index 122a9daf1d1ea..34ac26f99fec3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext.ll @@ -4,11 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK -declare @llvm.riscv.vzext.nxv1i64.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -25,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -48,11 +36,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i64.nxv2i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -69,13 +52,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i64.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -92,11 +68,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i64.nxv4i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -113,13 +84,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i64.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -136,11 +100,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i64.nxv8i8( - , - , - iXLen); - define @intrinsic_vzext_vf8_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -157,13 +116,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i64.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf8_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -180,11 +132,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i64.nxv1i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -201,13 +148,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -224,11 +164,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i64.nxv2i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -245,13 +180,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i64.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -268,11 +196,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i64.nxv4i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -289,13 +212,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i64.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -312,11 +228,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i64.nxv8i16( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -333,13 +244,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i64.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -356,11 +260,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i32.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -377,13 +276,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i32.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -400,11 +292,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i32.nxv2i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -421,13 +308,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i32.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -444,11 +324,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i32.nxv4i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -465,13 +340,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -488,11 +356,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i32.nxv8i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -509,13 +372,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -532,11 +388,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv16i32.nxv16i8( - , - , - iXLen); - define @intrinsic_vzext_vf4_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -553,13 +404,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv16i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf4_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -576,11 +420,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i64.nxv1i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -597,13 +436,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry @@ -620,11 +452,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i64.nxv2i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -641,13 +468,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i64.nxv2i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry @@ -664,11 +484,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i64.nxv4i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -685,13 +500,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i64.nxv4i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry @@ -708,11 +516,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i64.nxv8i32( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -729,13 +532,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i64.nxv8i32( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry @@ -752,11 +548,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i32.nxv1i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -773,13 +564,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i32.nxv1i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry @@ -796,11 +580,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i32.nxv2i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -817,13 +596,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i32.nxv2i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry @@ -840,11 +612,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i32.nxv4i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -861,13 +628,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i32.nxv4i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry @@ -884,11 +644,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i32.nxv8i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -905,13 +660,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i32.nxv8i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry @@ -928,11 +676,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv16i32.nxv16i16( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -949,13 +692,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv16i32.nxv16i16( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry @@ -972,11 +708,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv1i16.nxv1i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -993,13 +724,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv1i16.nxv1i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry @@ -1016,11 +740,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv2i16.nxv2i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1037,13 +756,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv2i16.nxv2i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry @@ -1060,11 +772,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv4i16.nxv4i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1081,13 +788,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv4i16.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry @@ -1104,11 +804,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv8i16.nxv8i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1125,13 +820,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv8i16.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry @@ -1148,11 +836,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv16i16.nxv16i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1169,13 +852,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv16i16.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -1192,11 +868,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.nxv32i16.nxv32i8( - , - , - iXLen); - define @intrinsic_vzext_vf2_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry @@ -1213,13 +884,6 @@ entry: ret %a } -declare @llvm.riscv.vzext.mask.nxv32i16.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_vzext_mask_vf2_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll index 5872a0995feba..bc10da7e7b6e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll @@ -25,7 +25,3 @@ entry: ret void } -; Function Attrs: argmemonly mustprogress nofree nounwind willreturn -declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #1 - -attributes #1 = { argmemonly mustprogress nofree nounwind willreturn } diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll index 37899e4a80e92..9aaa945fc471c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen) - define @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf8: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf4: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8mf2: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m1: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m2: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m4: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e8m8: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen) - define @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf4: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16mf2: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m1: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m4: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e16m8: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32mf2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m1: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m2: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m4: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_x_e32m8: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen) - define void @test_sf_vc_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define void @test_sf_vc_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8mf8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m1: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m2: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e8m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e8m8: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m1: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m2: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e16m8: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m1: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m2: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m4: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e32m8: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_v_i_e64m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_i_e64m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_se_e64m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m1: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m2: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m4: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e16m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e16m8: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m1: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m2: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m4: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e32m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e32m8: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m1(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m1: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m2(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m2: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m4(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m4: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_f_sf_vc_v_i_e64m8(iXLen %vl) { ; CHECK-LABEL: test_f_sf_vc_v_i_e64m8: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) - define @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) - define @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) - define @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2238,4 +1896,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll index 23628a98feb7c..c30e91448e519 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e8m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, , , iXLen) - define void @test_sf_vc_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8mf8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e8m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8mf8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m1: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m2: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m4: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e8m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e8m8: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e16m8: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m1: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m2: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m4: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e32m8: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m1: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m2: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m4: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , iXLen) - define @test_sf_vc_v_vv_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vv_e64m8: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_xv_se_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, , i8, iXLen) - define void @test_sf_vc_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define void @test_sf_vc_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define void @test_sf_vc_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e8mf8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8mf4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8mf2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m1( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m2( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m4( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e8m8( %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, , i8, iXLen) - define @test_sf_vc_v_xv_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - define @test_sf_vc_v_xv_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define @test_sf_vc_v_xv_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_iv_se_e8mf8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e8m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define void @test_sf_vc_iv_se_e64m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8mf8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e8m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_se_e64m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8mf8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e8m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define @test_sf_vc_v_iv_e64m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_iv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16mf4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e16m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32mf2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2693,8 +2281,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e32m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2706,8 +2292,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2719,8 +2303,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m1( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2732,8 +2314,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2745,8 +2325,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m2( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2758,8 +2336,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2771,8 +2347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m4( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2784,8 +2358,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2797,8 +2369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen, iXLen, , , iXLen) - define @test_sf_vc_v_fvv_se_e64m8( %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2810,8 +2380,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , iXLen) - define void @test_sf_vc_fvx_se_e16mf4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2823,8 +2391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16mf4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2836,8 +2402,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16mf2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2849,8 +2413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16mf2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2862,8 +2424,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m1( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2875,8 +2435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m1( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2888,8 +2446,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2901,8 +2457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m2( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2914,8 +2468,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2927,8 +2479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m4( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2940,8 +2490,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e16m8( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2953,8 +2501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - define @test_sf_vc_v_fvx_se_e16m8( %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2966,8 +2512,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen, , i16, iXLen) - define void @test_sf_vc_fvx_se_e32mf2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2979,8 +2523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32mf2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2992,8 +2534,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m1( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3005,8 +2545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m1( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3018,8 +2556,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3031,8 +2567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m2( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3044,8 +2578,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m4( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3057,8 +2589,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m4( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3070,8 +2600,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvx_se_e32m8( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3083,8 +2611,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - define @test_sf_vc_v_fvx_se_e32m8( %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3096,8 +2622,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen, , i32, iXLen) - define void @test_sf_vc_fvi_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3109,8 +2633,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16mf4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3122,8 +2644,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3135,8 +2655,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3148,8 +2666,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3161,8 +2677,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3174,8 +2688,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3187,8 +2699,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3200,8 +2710,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3213,8 +2721,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3226,8 +2732,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3239,8 +2743,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e16m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3252,8 +2754,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3265,8 +2765,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32mf2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3278,8 +2776,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3291,8 +2787,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m1( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3304,8 +2798,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3317,8 +2809,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m2( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3330,8 +2820,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3343,8 +2831,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m4( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3356,8 +2842,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvi_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3369,8 +2853,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - define @test_sf_vc_v_fvi_se_e32m8( %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3382,8 +2864,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, , iXLen, iXLen) - define void @test_sf_vc_fvf_se_e16mf4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3395,8 +2875,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16mf4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3408,8 +2886,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16mf2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3421,8 +2897,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16mf2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3434,8 +2908,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m1( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3447,8 +2919,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m1( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3460,8 +2930,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3473,8 +2941,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m2( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3486,8 +2952,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3499,8 +2963,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m4( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3512,8 +2974,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e16m8( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3525,8 +2985,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen, iXLen, , half, iXLen) - define @test_sf_vc_v_fvf_se_e16m8( %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3538,8 +2996,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen, , half, iXLen) - define void @test_sf_vc_fvf_se_e32mf2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3551,8 +3007,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32mf2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3564,8 +3018,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m1( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3577,8 +3029,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m1( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3590,8 +3040,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3603,8 +3051,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m2( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3616,8 +3062,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m4( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3629,8 +3073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m4( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3642,8 +3084,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen, , float, iXLen) - define void @test_sf_vc_fvf_se_e32m8( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3655,8 +3095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen, iXLen, , float, iXLen) - define @test_sf_vc_v_fvf_se_e32m8( %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3668,4 +3106,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen, , float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll index b09e9f0e3365c..775b4e6f8affb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e8m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -94,8 +82,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -107,8 +93,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -120,8 +104,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -133,8 +115,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -146,8 +126,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -159,8 +137,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -173,8 +149,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -186,8 +160,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -199,8 +171,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -212,8 +182,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -225,8 +193,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -239,8 +205,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -252,8 +216,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -265,8 +227,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -278,8 +238,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -292,8 +250,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -305,8 +261,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -318,8 +272,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -331,8 +283,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -344,8 +294,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -357,8 +305,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -370,8 +316,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e8m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -384,8 +328,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -397,8 +339,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -410,8 +350,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -423,8 +361,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -436,8 +372,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -449,8 +383,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -463,8 +395,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -476,8 +406,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -489,8 +417,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -502,8 +428,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -515,8 +439,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -529,8 +451,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -542,8 +462,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -555,8 +473,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -568,8 +484,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -582,8 +496,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -595,8 +507,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -608,8 +518,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -621,8 +529,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -634,8 +540,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -647,8 +551,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -660,8 +562,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e8m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -674,8 +574,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -687,8 +585,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -700,8 +596,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -713,8 +607,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -726,8 +618,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -739,8 +629,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -753,8 +641,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -766,8 +652,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -779,8 +663,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -792,8 +674,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -805,8 +685,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -819,8 +697,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m1: ; CHECK: # %bb.0: # %entry @@ -832,8 +708,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m2: ; CHECK: # %bb.0: # %entry @@ -845,8 +719,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m4: ; CHECK: # %bb.0: # %entry @@ -858,8 +730,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvv_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvv_e64m8: ; CHECK: # %bb.0: # %entry @@ -872,8 +742,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_xvv_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -885,8 +753,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -898,8 +764,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -911,8 +775,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -924,8 +786,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -937,8 +797,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -950,8 +808,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -963,8 +819,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -976,8 +830,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -989,8 +841,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1002,8 +852,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1015,8 +863,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1028,8 +874,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1041,8 +885,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1054,8 +896,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1067,8 +907,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1080,8 +918,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1093,8 +929,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1106,8 +940,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1119,8 +951,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1132,8 +962,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1145,8 +973,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1158,8 +984,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1171,8 +995,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1184,8 +1006,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1197,8 +1017,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1210,8 +1028,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1223,8 +1039,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1236,8 +1050,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1249,8 +1061,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1262,8 +1072,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1275,8 +1083,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1288,8 +1094,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1301,8 +1105,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1314,8 +1116,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1327,8 +1127,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1340,8 +1138,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1353,8 +1149,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1366,8 +1160,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1379,8 +1171,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m1: ; CHECK: # %bb.0: # %entry @@ -1392,8 +1182,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m2: ; CHECK: # %bb.0: # %entry @@ -1405,8 +1193,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m4: ; CHECK: # %bb.0: # %entry @@ -1418,8 +1204,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e8m8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e8m8: ; CHECK: # %bb.0: # %entry @@ -1431,8 +1215,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvv_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1444,8 +1226,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1457,8 +1237,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m1: ; CHECK: # %bb.0: # %entry @@ -1470,8 +1248,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m2: ; CHECK: # %bb.0: # %entry @@ -1483,8 +1259,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m4: ; CHECK: # %bb.0: # %entry @@ -1496,8 +1270,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e16m8: ; CHECK: # %bb.0: # %entry @@ -1509,8 +1281,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvv_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1522,8 +1292,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m1: ; CHECK: # %bb.0: # %entry @@ -1535,8 +1303,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m2: ; CHECK: # %bb.0: # %entry @@ -1548,8 +1314,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m4: ; CHECK: # %bb.0: # %entry @@ -1561,8 +1325,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvv_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvv_e32m8: ; CHECK: # %bb.0: # %entry @@ -1574,8 +1336,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_ivv_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1587,8 +1347,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1600,8 +1358,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1613,8 +1369,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1626,8 +1380,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1639,8 +1391,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1652,8 +1402,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e8m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1665,8 +1413,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1678,8 +1424,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1691,8 +1435,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1704,8 +1446,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1717,8 +1457,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1730,8 +1468,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -1743,8 +1479,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1756,8 +1490,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1769,8 +1501,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1782,8 +1512,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1795,8 +1523,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1808,8 +1534,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1821,8 +1545,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1834,8 +1556,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1847,8 +1567,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1860,8 +1578,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1873,8 +1589,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1886,8 +1600,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1899,8 +1611,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1912,8 +1622,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1925,8 +1633,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1938,8 +1644,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e8m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8: ; CHECK: # %bb.0: # %entry @@ -1951,8 +1655,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1964,8 +1666,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1977,8 +1677,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1990,8 +1688,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2003,8 +1699,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2016,8 +1710,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2029,8 +1721,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2042,8 +1732,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2055,8 +1743,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2068,8 +1754,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2081,8 +1765,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2094,8 +1776,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2107,8 +1787,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2120,8 +1798,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2133,8 +1809,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2146,8 +1820,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8: ; CHECK: # %bb.0: # %entry @@ -2159,8 +1831,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4: ; CHECK: # %bb.0: # %entry @@ -2172,8 +1842,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2: ; CHECK: # %bb.0: # %entry @@ -2185,8 +1853,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m1: ; CHECK: # %bb.0: # %entry @@ -2198,8 +1864,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m2: ; CHECK: # %bb.0: # %entry @@ -2211,8 +1875,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m4: ; CHECK: # %bb.0: # %entry @@ -2224,8 +1886,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e8m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e8m8: ; CHECK: # %bb.0: # %entry @@ -2237,8 +1897,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2250,8 +1908,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2263,8 +1919,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m1: ; CHECK: # %bb.0: # %entry @@ -2276,8 +1930,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m2: ; CHECK: # %bb.0: # %entry @@ -2289,8 +1941,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m4: ; CHECK: # %bb.0: # %entry @@ -2302,8 +1952,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e16m8: ; CHECK: # %bb.0: # %entry @@ -2315,8 +1963,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2328,8 +1974,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m1: ; CHECK: # %bb.0: # %entry @@ -2341,8 +1985,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m2: ; CHECK: # %bb.0: # %entry @@ -2354,8 +1996,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m4: ; CHECK: # %bb.0: # %entry @@ -2367,8 +2007,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e32m8: ; CHECK: # %bb.0: # %entry @@ -2380,8 +2018,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m1: ; CHECK: # %bb.0: # %entry @@ -2393,8 +2029,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m2: ; CHECK: # %bb.0: # %entry @@ -2406,8 +2040,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m4: ; CHECK: # %bb.0: # %entry @@ -2419,8 +2051,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivv_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivv_e64m8: ; CHECK: # %bb.0: # %entry @@ -2432,8 +2062,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2445,8 +2073,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2458,8 +2084,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2471,8 +2095,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2484,8 +2106,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2497,8 +2117,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2510,8 +2128,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2523,8 +2139,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2536,8 +2150,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2549,8 +2161,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2562,8 +2172,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2184,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2590,8 +2196,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2603,8 +2207,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2616,8 +2218,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2629,8 +2229,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2642,8 +2240,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2655,8 +2251,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2668,8 +2262,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2681,8 +2273,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2694,8 +2284,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2708,8 +2296,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2722,8 +2308,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2735,8 +2319,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2748,8 +2330,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2761,8 +2341,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2774,8 +2352,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2787,8 +2363,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2800,8 +2374,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2814,8 +2386,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fv_fvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2828,8 +2398,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fvvx_se_e16mf4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2841,8 +2409,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16mf4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -2854,8 +2420,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2867,8 +2431,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -2880,8 +2442,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2893,8 +2453,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -2906,8 +2464,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2919,8 +2475,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -2932,8 +2486,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2945,8 +2497,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -2958,8 +2508,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e16m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2971,8 +2519,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_fvvx_se_e16m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -2984,8 +2530,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fvvx_se_e32mf2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2997,8 +2541,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32mf2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3010,8 +2552,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3023,8 +2563,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3036,8 +2574,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3049,8 +2585,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3062,8 +2596,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3075,8 +2607,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3088,8 +2618,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvx_se_e32m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3101,8 +2629,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_fvvx_se_e32m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3114,8 +2640,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fvvi_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3127,8 +2651,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3140,8 +2662,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3153,8 +2673,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3166,8 +2684,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3179,8 +2695,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3192,8 +2706,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3205,8 +2717,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3218,8 +2728,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3231,8 +2739,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3244,8 +2750,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3257,8 +2761,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e16m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3270,8 +2772,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3283,8 +2783,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3296,8 +2794,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3309,8 +2805,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3322,8 +2816,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3335,8 +2827,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3348,8 +2838,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3361,8 +2849,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3374,8 +2860,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3387,8 +2871,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fv_fvvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3400,8 +2882,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fvvf_se_e16mf4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3413,8 +2893,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16mf4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -3426,8 +2904,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3439,8 +2915,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -3452,8 +2926,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3465,8 +2937,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -3478,8 +2948,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3491,8 +2959,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -3504,8 +2970,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3517,8 +2981,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -3530,8 +2992,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e16m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3543,8 +3003,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fv_fvvf_se_e16m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry @@ -3556,8 +3014,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, , , half %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32mf2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3569,8 +3025,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32mf2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -3582,8 +3036,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3595,8 +3047,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -3608,8 +3058,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3621,8 +3069,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -3634,8 +3080,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3647,8 +3091,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -3660,8 +3102,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float %rs1, iXLen) - define void @test_sf_vc_fvvf_se_e32m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3673,8 +3113,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fv_fvvf_se_e32m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -3686,4 +3124,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, , , float %rs1, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll index 29b9238b8e9c0..b51047a53ed7a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll @@ -15,8 +15,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -28,8 +26,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -41,8 +37,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -54,8 +48,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -67,8 +59,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -80,8 +70,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -93,8 +81,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -106,8 +92,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -119,8 +103,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -132,8 +114,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -145,8 +125,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -158,8 +136,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -171,8 +147,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -184,8 +158,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -197,8 +169,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -210,8 +180,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -223,8 +191,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -236,8 +202,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -249,8 +213,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -262,8 +224,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -275,8 +235,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -288,8 +246,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -301,8 +257,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -314,8 +268,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -327,8 +279,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -340,8 +290,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -353,8 +301,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -366,8 +312,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -379,8 +323,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -392,8 +334,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8mf8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -405,8 +345,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -418,8 +356,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -431,8 +367,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -444,8 +378,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -457,8 +389,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e8m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -470,8 +400,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -483,8 +411,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -496,8 +422,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -509,8 +433,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -522,8 +444,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e16m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -535,8 +455,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -548,8 +466,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -561,8 +477,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -574,8 +488,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_v_vvw_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_vvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -587,8 +499,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_xvw_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -600,8 +510,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -613,8 +521,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -626,8 +532,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -639,8 +543,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -652,8 +554,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -665,8 +565,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define void @test_sf_vc_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -678,8 +576,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -691,8 +587,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -704,8 +598,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -717,8 +609,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -730,8 +620,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -743,8 +631,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -756,8 +642,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -769,8 +653,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -782,8 +664,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -795,8 +675,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -808,8 +686,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -821,8 +697,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -834,8 +708,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -847,8 +719,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -860,8 +730,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -873,8 +741,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -886,8 +752,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -899,8 +763,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -912,8 +774,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -925,8 +785,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -938,8 +796,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -951,8 +807,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -964,8 +818,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -977,8 +829,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e8mf8( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -990,8 +840,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8mf4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1003,8 +851,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8mf2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1016,8 +862,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8m1( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1029,8 +873,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8m2( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1042,8 +884,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e8m4( %vd, %vs2, i8 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1055,8 +895,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, , , i8, iXLen) - define @test_sf_vc_v_xvw_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1068,8 +906,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1081,8 +917,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1094,8 +928,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1107,8 +939,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1120,8 +950,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_v_xvw_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1133,8 +961,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1146,8 +972,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1159,8 +983,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_v_xvw_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_xvw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1172,8 +994,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_ivw_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1185,8 +1005,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1198,8 +1016,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1211,8 +1027,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1224,8 +1038,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1237,8 +1049,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1250,8 +1060,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1263,8 +1071,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1276,8 +1082,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1289,8 +1093,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1302,8 +1104,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1315,8 +1115,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1328,8 +1126,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1341,8 +1137,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1354,8 +1148,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1367,8 +1159,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1380,8 +1170,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1393,8 +1181,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1406,8 +1192,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1: ; CHECK: # %bb.0: # %entry @@ -1419,8 +1203,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2: ; CHECK: # %bb.0: # %entry @@ -1432,8 +1214,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4: ; CHECK: # %bb.0: # %entry @@ -1445,8 +1225,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1458,8 +1236,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1471,8 +1247,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1: ; CHECK: # %bb.0: # %entry @@ -1484,8 +1258,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2: ; CHECK: # %bb.0: # %entry @@ -1497,8 +1269,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4: ; CHECK: # %bb.0: # %entry @@ -1510,8 +1280,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1523,8 +1291,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1536,8 +1302,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1549,8 +1313,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1562,8 +1324,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8mf8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8: ; CHECK: # %bb.0: # %entry @@ -1575,8 +1335,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4: ; CHECK: # %bb.0: # %entry @@ -1588,8 +1346,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2: ; CHECK: # %bb.0: # %entry @@ -1601,8 +1357,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m1: ; CHECK: # %bb.0: # %entry @@ -1614,8 +1368,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m2: ; CHECK: # %bb.0: # %entry @@ -1627,8 +1379,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e8m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e8m4: ; CHECK: # %bb.0: # %entry @@ -1640,8 +1390,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16mf4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4: ; CHECK: # %bb.0: # %entry @@ -1653,8 +1401,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2: ; CHECK: # %bb.0: # %entry @@ -1666,8 +1412,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m1: ; CHECK: # %bb.0: # %entry @@ -1679,8 +1423,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m2: ; CHECK: # %bb.0: # %entry @@ -1692,8 +1434,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e16m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e16m4: ; CHECK: # %bb.0: # %entry @@ -1705,8 +1445,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1718,8 +1456,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m1: ; CHECK: # %bb.0: # %entry @@ -1731,8 +1467,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m2: ; CHECK: # %bb.0: # %entry @@ -1744,8 +1478,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_v_ivw_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_v_ivw_e32m4: ; CHECK: # %bb.0: # %entry @@ -1757,8 +1489,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1770,8 +1500,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -1783,8 +1511,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1796,8 +1522,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -1809,8 +1533,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1822,8 +1544,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -1835,8 +1555,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1848,8 +1566,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -1861,8 +1577,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1874,8 +1588,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -1887,8 +1599,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1900,8 +1610,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -1913,8 +1621,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1926,8 +1632,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -1939,8 +1643,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1952,8 +1654,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -1965,8 +1665,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1978,8 +1676,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define @test_sf_vc_fw_fwvvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -1991,8 +1687,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) - define void @test_sf_vc_fwvx_se_e32mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2004,8 +1698,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2017,8 +1709,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2030,8 +1720,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m1( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2043,8 +1731,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2056,8 +1742,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m2( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2069,8 +1753,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2082,8 +1764,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m4( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2095,8 +1775,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e32m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2108,8 +1786,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define @test_sf_vc_w_fwvx_se_e32m8( %vd, %vs2, i16 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2121,8 +1797,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) - define void @test_sf_vc_fwvx_se_e64m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2134,8 +1808,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m1( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2147,8 +1819,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2160,8 +1830,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m2( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2173,8 +1841,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2186,8 +1852,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m4( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2199,8 +1863,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvx_se_e64m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2212,8 +1874,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define @test_sf_vc_w_fwvx_se_e64m8( %vd, %vs2, i32 %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2225,8 +1885,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) - define void @test_sf_vc_fwvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2238,8 +1896,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32mf2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2251,8 +1907,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2264,8 +1918,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2277,8 +1929,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2290,8 +1940,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2303,8 +1951,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2316,8 +1962,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2329,8 +1973,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2342,8 +1984,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e32m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2355,8 +1995,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2368,8 +2006,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m1( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2381,8 +2017,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2394,8 +2028,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m2( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2407,8 +2039,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2420,8 +2050,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m4( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2433,8 +2061,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvi_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2446,8 +2072,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define @test_sf_vc_fw_fwvi_se_e64m8( %vd, %vs2, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2459,8 +2083,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) - define void @test_sf_vc_fwvf_se_e32mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2472,8 +2094,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32mf2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry @@ -2485,8 +2105,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2498,8 +2116,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m1( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry @@ -2511,8 +2127,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2524,8 +2138,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m2( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry @@ -2537,8 +2149,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2550,8 +2160,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m4( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry @@ -2563,8 +2171,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e32m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2576,8 +2182,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - define @test_sf_vc_fw_fwvf_se_e32m8( %vd, %vs2, half %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry @@ -2589,8 +2193,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - define void @test_sf_vc_fwvf_se_e64m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2602,8 +2204,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m1( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry @@ -2615,8 +2215,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - define void @test_sf_vc_fwvf_se_e64m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2628,8 +2226,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m2( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry @@ -2641,8 +2237,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - define void @test_sf_vc_fwvf_se_e64m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2654,8 +2248,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m4( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry @@ -2667,8 +2259,6 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - define void @test_sf_vc_fwvf_se_e64m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2680,8 +2270,6 @@ entry: ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - define @test_sf_vc_fw_fwvf_se_e64m8( %vd, %vs2, float %rs1, iXLen %vl) { ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry @@ -2693,4 +2281,3 @@ entry: ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll index 03f92c7229c18..b8dddbb531370 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqa.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqa.nxv1i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv1i32.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv2i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv2i32.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv4i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv4i32.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv8i32.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv8i32.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -192,14 +136,6 @@ entry: ret %a } - -declare @llvm.riscv.th.vmaqa.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -240,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -263,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -287,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -310,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -334,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqa_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -357,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqa.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqa_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll index b17035f377c61..7f945cf7f35bb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqasu.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqasu.nxv1i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv1i32.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv2i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv2i32.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv4i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv4i32.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv8i32.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv8i32.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -192,14 +136,6 @@ entry: ret %a } - -declare @llvm.riscv.th.vmaqasu.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -240,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -263,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -287,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -310,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -334,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqasu_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -357,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqasu.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqasu_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll index 809b81fa38435..0ae95e0994033 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqau.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqau.nxv1i32.nxv4i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv1i32.nxv4i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv1i32_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv2i32.nxv8i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv2i32.nxv8i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv2i32_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv4i32.nxv16i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv4i32.nxv16i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv4i32_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv8i32.nxv32i8( - , - , - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv8i32.nxv32i8( - , - , - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv8i32_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -192,14 +136,6 @@ entry: ret %a } - -declare @llvm.riscv.th.vmaqau.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -216,13 +152,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -240,13 +169,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -263,13 +185,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -287,13 +202,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -310,13 +218,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -334,13 +235,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqau_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -357,13 +251,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqau.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqau_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll index cd6e749b656fb..f44b7597fe75f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xtheadvdot-vmaqaus.ll @@ -4,13 +4,6 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ ; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.th.vmaqaus.nxv1i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -27,13 +20,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv1i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv1i32_i8_nxv4i8: ; CHECK: # %bb.0: # %entry @@ -51,13 +37,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.nxv2i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -74,13 +53,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv2i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv2i32_i8_nxv8i8: ; CHECK: # %bb.0: # %entry @@ -98,13 +70,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.nxv4i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -121,13 +86,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv4i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv4i32_i8_nxv16i8: ; CHECK: # %bb.0: # %entry @@ -145,13 +103,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.nxv8i32.i8( - , - i8, - , - iXLen, - iXLen); - define @intrinsic_th_vmaqaus_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry @@ -168,13 +119,6 @@ entry: ret %a } -declare @llvm.riscv.th.vmaqaus.mask.nxv8i32.i8( - , - i8, - , - , - iXLen, iXLen); - define @intrinsic_th_vmaqaus_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv8i32_i8_nxv32i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll index 1261d824968d6..d0ad1acd4fba4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -5,9 +5,6 @@ ; Make sure we don't select a 0 vl to X0 in the custom isel handlers we use ; for these intrinsics. -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2_mask_nxv16i16(ptr %base, %mask) { ; CHECK-LABEL: test_vlseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -21,9 +18,6 @@ entry: ret %2 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64, i64) - define @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, %mask) { ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -37,8 +31,6 @@ entry: %2 = call @llvm.riscv.tuple.extract.nxv16i16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2) %1, i32 1) ret %2 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) define @test_vloxseg2_mask_nxv16i16_nxv16i16(ptr %base, %index, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: @@ -55,9 +47,6 @@ entry: ret %2 } -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) - define @test_vluxseg2_mask_nxv16i16_nxv16i16(ptr %base, %index, %mask) { ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -73,9 +62,6 @@ entry: ret %2 } -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare {target("riscv.vector.tuple", , 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) - define @test_vlseg2ff_nxv16i16(ptr %base, ptr %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -110,9 +96,6 @@ entry: ret %2 } -declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) -declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, , i64, i64) - define void @test_vsseg2_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base) { ; CHECK-LABEL: test_vsseg2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -135,9 +118,6 @@ entry: ret void } -declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64, i64) -declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, , i64, i64) - define void @test_vssseg2_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %offset) { ; CHECK-LABEL: test_vssseg2_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -160,9 +140,6 @@ entry: ret void } -declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsoxseg2_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index) { ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry @@ -185,9 +162,6 @@ entry: ret void } -declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) -declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) - define void @test_vsuxseg2_nxv16i16_nxv16i16(target("riscv.vector.tuple", , 2) %val, ptr %base, %index) { ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll index 1d6d07aa67337..27c7518c4f6c4 100644 --- a/llvm/test/CodeGen/RISCV/sadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.sadd.sat.i4(i4, i4) -declare i8 @llvm.sadd.sat.i8(i8, i8) -declare i16 @llvm.sadd.sat.i16(i16, i16) -declare i32 @llvm.sadd.sat.i32(i32, i32) -declare i64 @llvm.sadd.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32-LABEL: func: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll index 9200a77915c56..108a214535c3e 100644 --- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.sadd.sat.i4(i4, i4) -declare i8 @llvm.sadd.sat.i8(i8, i8) -declare i16 @llvm.sadd.sat.i16(i16, i16) -declare i32 @llvm.sadd.sat.i32(i32, i32) -declare i64 @llvm.sadd.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32-LABEL: func32: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/saverestore.ll b/llvm/test/CodeGen/RISCV/saverestore.ll index f753f817b0ab6..85db6a01e3f66 100644 --- a/llvm/test/CodeGen/RISCV/saverestore.ll +++ b/llvm/test/CodeGen/RISCV/saverestore.ll @@ -180,9 +180,6 @@ entry: ; Check that functions with varargs do not use save/restore code -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - define i32 @varargs(ptr %fmt, ...) nounwind { ; RV32I-LABEL: varargs: ; RV32I-NOT: call t0, __riscv_save @@ -249,8 +246,6 @@ entry: ; Check that dynamic allocation calculations remain correct -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) declare void @notdead(ptr) define void @alloca(i32 %n) nounwind { diff --git a/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir b/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir index f8d6d4b13846e..f247c5f38037b 100644 --- a/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir +++ b/llvm/test/CodeGen/RISCV/sextw-removal-debug.mir @@ -10,9 +10,6 @@ ret void, !dbg !13 } - declare void @llvm.dbg.value(metadata, metadata, metadata) #0 - - attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } !llvm.dbg.cu = !{!0} !llvm.debugify = !{!2, !3} diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll index 9f326280885b5..7e7ba9fc6803a 100644 --- a/llvm/test/CodeGen/RISCV/sextw-removal.ll +++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll @@ -256,8 +256,6 @@ bb7: ; preds = %bb2 ret void } -declare i32 @llvm.ctpop.i32(i32) - define void @test6(i32 signext %arg, i32 signext %arg1) nounwind { ; CHECK-LABEL: test6: ; CHECK: # %bb.0: # %bb @@ -410,8 +408,6 @@ bb7: ; preds = %bb2 ret void } -declare i64 @llvm.ctpop.i64(i64) - define void @test8(i32 signext %arg, i32 signext %arg1) nounwind { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: # %bb @@ -715,7 +711,6 @@ bb7: ; preds = %bb2 ret i32 %i8 } - ; int test14(int a, int n) { ; for (int i = 1; i < n; ++i) { ; if (a > 1000) @@ -1323,7 +1318,6 @@ bb2: ; preds = %bb2, %bb bb7: ; preds = %bb2 ret void } -declare i32 @llvm.riscv.sha256sig0(i32) ; The type promotion of %7 forms a sext_inreg, but %7 and %6 are combined to ; form a sh2add. This leaves behind a sext.w that isn't needed. @@ -1499,8 +1493,6 @@ bb7: ; preds = %bb2 ret void } -declare i32 @llvm.riscv.vmv.x.s.nxv1i32( ) - ; Test that we can look through brev8 in hasAllNBitUsers. define signext i32 @test21(i64 %arg1, i64 %arg2, i64 %arg3) { ; RV64I-LABEL: test21: diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll index 1ca23d72b107b..f5ec7da7b70fe 100644 --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -7,9 +7,6 @@ ; Basic shift support is tested as part of ALU.ll. This file ensures that ; shifts which may not be supported natively are lowered properly. -declare i64 @llvm.fshr.i64(i64, i64, i64) -declare i128 @llvm.fshr.i128(i128, i128, i128) - define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll index 1bfeeb92e06dd..dc625e25bd6f4 100644 --- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll +++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll @@ -2047,7 +2047,6 @@ define signext i32 @abs_i32(i32 signext %x) { %a = call i32 @llvm.abs.i32(i32 %x, i1 false) ret i32 %a } -declare i32 @llvm.abs.i32(i32, i1) define i64 @abs_i64(i64 %x) { ; NOSFB-LABEL: abs_i64: @@ -2088,7 +2087,6 @@ define i64 @abs_i64(i64 %x) { %a = call i64 @llvm.abs.i64(i64 %x, i1 false) ret i64 %a } -declare i64 @llvm.abs.i64(i64, i1) define i64 @select_andn(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) { ; NOSFB-LABEL: select_andn: diff --git a/llvm/test/CodeGen/RISCV/simplify-condbr.ll b/llvm/test/CodeGen/RISCV/simplify-condbr.ll index 6dabd7d93cbc1..4aadd034ff0f7 100644 --- a/llvm/test/CodeGen/RISCV/simplify-condbr.ll +++ b/llvm/test/CodeGen/RISCV/simplify-condbr.ll @@ -2,12 +2,6 @@ ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 - -; Function Attrs: nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) -declare void @llvm.assume(i1 noundef) #0 - declare fastcc i1 @S_reginclass() declare fastcc ptr @Perl_av_store(i64) @@ -175,5 +169,3 @@ sw.bb85: ; preds = %if.end48 br label %common.ret } -attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } -attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) } diff --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll index ba4d170c719fc..0ee97d6660451 100644 --- a/llvm/test/CodeGen/RISCV/ssub_sat.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.ssub.sat.i4(i4, i4) -declare i8 @llvm.ssub.sat.i8(i8, i8) -declare i16 @llvm.ssub.sat.i16(i16, i16) -declare i32 @llvm.ssub.sat.i32(i32, i32) -declare i64 @llvm.ssub.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32-LABEL: func: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll index 437c1e2a2e489..f74cbd442ab83 100644 --- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb -declare i4 @llvm.ssub.sat.i4(i4, i4) -declare i8 @llvm.ssub.sat.i8(i8, i8) -declare i16 @llvm.ssub.sat.i16(i16, i16) -declare i32 @llvm.ssub.sat.i32(i32, i32) -declare i64 @llvm.ssub.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32-LABEL: func32: ; RV32: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/tail-calls.ll b/llvm/test/CodeGen/RISCV/tail-calls.ll index 366b37ac5d472..6756fea8a1f85 100644 --- a/llvm/test/CodeGen/RISCV/tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/tail-calls.ll @@ -26,7 +26,6 @@ entry: ; Perform tail call optimization for external symbol. @dest = global [2 x i8] zeroinitializer -declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1) define void @caller_extern(ptr %src) optsize { ; CHECK-LABEL: caller_extern: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/thread-pointer.ll b/llvm/test/CodeGen/RISCV/thread-pointer.ll index 4465b7ecc910c..d4f318a0cf2a1 100644 --- a/llvm/test/CodeGen/RISCV/thread-pointer.ll +++ b/llvm/test/CodeGen/RISCV/thread-pointer.ll @@ -2,8 +2,6 @@ ; RUN: llc < %s -mtriple=riscv64 | FileCheck %s ; RUN: llc < %s -mtriple=riscv32 | FileCheck %s -declare ptr @llvm.thread.pointer() - define ptr @thread_pointer() nounwind { ; CHECK-LABEL: thread_pointer: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/uadd_sat.ll b/llvm/test/CodeGen/RISCV/uadd_sat.ll index ee591a1784635..4e0c4ab750592 100644 --- a/llvm/test/CodeGen/RISCV/uadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.uadd.sat.i4(i4, i4) -declare i8 @llvm.uadd.sat.i8(i8, i8) -declare i16 @llvm.uadd.sat.i16(i16, i16) -declare i32 @llvm.uadd.sat.i32(i32, i32) -declare i64 @llvm.uadd.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll index da29d26b7147f..a6afef4286dea 100644 --- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.uadd.sat.i4(i4, i4) -declare i8 @llvm.uadd.sat.i8(i8, i8) -declare i16 @llvm.uadd.sat.i16(i16, i16) -declare i32 @llvm.uadd.sat.i32(i32, i32) -declare i64 @llvm.uadd.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll index 34d9aaf39bf72..938e6550387f5 100644 --- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll +++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll @@ -118,8 +118,6 @@ start: } ; Function Attrs: nounwind readnone speculatable -declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1 attributes #0 = { nounwind readnone } -attributes #1 = { nounwind readnone speculatable } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/RISCV/usub_sat.ll b/llvm/test/CodeGen/RISCV/usub_sat.ll index aab5626576427..33056682dcc79 100644 --- a/llvm/test/CodeGen/RISCV/usub_sat.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.usub.sat.i4(i4, i4) -declare i8 @llvm.usub.sat.i8(i8, i8) -declare i16 @llvm.usub.sat.i16(i16, i16) -declare i32 @llvm.usub.sat.i32(i32, i32) -declare i64 @llvm.usub.sat.i64(i64, i64) - define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll index 3285349ea068a..ef6bc022ddc9f 100644 --- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll @@ -4,12 +4,6 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV32IZbb ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefix=RV64IZbb -declare i4 @llvm.usub.sat.i4(i4, i4) -declare i8 @llvm.usub.sat.i8(i8, i8) -declare i16 @llvm.usub.sat.i16(i16, i16) -declare i32 @llvm.usub.sat.i32(i32, i32) -declare i64 @llvm.usub.sat.i64(i64, i64) - define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll index 91999444fa766..7312d005962ba 100644 --- a/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll +++ b/llvm/test/CodeGen/RISCV/vararg-ilp32e.ll @@ -4,8 +4,6 @@ ; RUN: llc -mtriple=riscv32 -target-abi ilp32e -frame-pointer=all -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=ILP32E-WITHFP %s -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) declare void @abort() define i32 @caller(i32 %a) { diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll index 3dd99f3d49d2d..fc391e9fb049e 100644 --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -40,9 +40,6 @@ ; The nounwind attribute is omitted for some of the tests, to check that CFI ; directives are correctly generated. -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - declare void @notdead(ptr) ; Although frontends are recommended to not generate va_arg due to the lack of @@ -1871,8 +1868,6 @@ define void @va3_caller() nounwind { ret void } -declare void @llvm.va_copy(ptr, ptr) - define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32-ILP32F-FPELIM-LABEL: va4_va_copy: ; ILP32-ILP32F-FPELIM: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll b/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll index c8c364208da90..b569854bb47df 100644 --- a/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll +++ b/llvm/test/CodeGen/RISCV/varargs-with-fp-and-second-adj.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+c,+v < %s | FileCheck --check-prefix=RV64V %s -declare void @llvm.va_copy.p0(ptr, ptr) -declare void @llvm.va_end.p0(ptr) - define dso_local void @_Z3fooPKcz(ptr noundef %0, ...) "frame-pointer"="all" { ; RV64V-LABEL: _Z3fooPKcz: ; RV64V: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/vlenb.ll b/llvm/test/CodeGen/RISCV/vlenb.ll index 280df6545fd06..cc2850617fcad 100644 --- a/llvm/test/CodeGen/RISCV/vlenb.ll +++ b/llvm/test/CodeGen/RISCV/vlenb.ll @@ -92,8 +92,6 @@ loop: br label %loop } - -declare i32 @llvm.read_register.i32(metadata) nounwind declare void @unknown() declare void @use(i32) diff --git a/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll b/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll index fd725e555a326..69958d48e63c8 100644 --- a/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll @@ -1,9 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple riscv64 -mattr +v -filetype asm -o - %s | FileCheck %s -declare i8 @llvm.vscale.i8() -declare @llvm.stepvector.nxv8i8() - define @f() #0 { ; CHECK-LABEL: f: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll index 93b68b0a95b48..62f08d7831dda 100644 --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -1953,7 +1953,6 @@ entry: ret i1 %obit } - ; ; Check the use of the overflow bit in combination with a select instruction. ; @@ -3809,7 +3808,6 @@ entry: ret i1 %ret } - ; ; Check the use of the overflow bit in combination with a branch instruction. ; @@ -5586,15 +5584,3 @@ IfNoOverflow: ret i64 %val } -declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone -declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone -declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone diff --git a/llvm/test/CodeGen/RISCV/xcvalu.ll b/llvm/test/CodeGen/RISCV/xcvalu.ll index 91e5153ee7a5b..5cc7bfbc7fba6 100644 --- a/llvm/test/CodeGen/RISCV/xcvalu.ll +++ b/llvm/test/CodeGen/RISCV/xcvalu.ll @@ -2,12 +2,6 @@ ; RUN: llc -O0 -mtriple=riscv32 -mattr=+m -mattr=+xcvalu -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.abs.i32(i32, i1) -declare i32 @llvm.smin.i32(i32, i32) -declare i32 @llvm.smax.i32(i32, i32) -declare i32 @llvm.umin.i32(i32, i32) -declare i32 @llvm.umax.i32(i32, i32) - define i32 @abs(i32 %a) { ; CHECK-LABEL: abs: ; CHECK: # %bb.0: @@ -111,8 +105,6 @@ define i32 @extbz(i8 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.clip(i32, i32) - define i32 @test.cv.alu.clip.case.a(i32 %a) { ; CHECK-LABEL: test.cv.alu.clip.case.a: ; CHECK: # %bb.0: @@ -132,8 +124,6 @@ define i32 @test.cv.alu.clip.case.b(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.clipu(i32, i32) - define i32 @test.cv.alu.clipu.case.a(i32 %a) { ; CHECK-LABEL: test.cv.alu.clipu.case.a: ; CHECK: # %bb.0: @@ -153,8 +143,6 @@ define i32 @test.cv.alu.clipu.case.b(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addN(i32, i32, i32) - define i32 @test.cv.alu.addN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.addN.case.a: ; CHECK: # %bb.0: @@ -174,8 +162,6 @@ define i32 @test.cv.alu.addN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.adduN(i32, i32, i32) - define i32 @test.cv.alu.adduN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.adduN.case.a: ; CHECK: # %bb.0: @@ -195,8 +181,6 @@ define i32 @test.cv.alu.adduN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.addRN(i32, i32, i32) - define i32 @test.cv.alu.addRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.addRN.case.a: ; CHECK: # %bb.0: @@ -216,8 +200,6 @@ define i32 @test.cv.alu.addRN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.adduRN(i32, i32, i32) - define i32 @test.cv.alu.adduRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.adduRN.case.a: ; CHECK: # %bb.0: @@ -237,8 +219,6 @@ define i32 @test.cv.alu.adduRN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subN(i32, i32, i32) - define i32 @test.cv.alu.subN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subN.case.a: ; CHECK: # %bb.0: @@ -258,8 +238,6 @@ define i32 @test.cv.alu.subN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subuN(i32, i32, i32) - define i32 @test.cv.alu.subuN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subuN.case.a: ; CHECK: # %bb.0: @@ -279,8 +257,6 @@ define i32 @test.cv.alu.subuN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subRN(i32, i32, i32) - define i32 @test.cv.alu.subRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subRN.case.a: ; CHECK: # %bb.0: @@ -300,8 +276,6 @@ define i32 @test.cv.alu.subRN.case.b(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.alu.subuRN(i32, i32, i32) - define i32 @test.cv.alu.subuRN.case.a(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.alu.subuRN.case.a: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xcvbitmanip.ll b/llvm/test/CodeGen/RISCV/xcvbitmanip.ll index b2cebabb7df8b..7e63efac9b62f 100644 --- a/llvm/test/CodeGen/RISCV/xcvbitmanip.ll +++ b/llvm/test/CodeGen/RISCV/xcvbitmanip.ll @@ -4,8 +4,6 @@ ; RUN: llc -O3 -mtriple=riscv32 -mattr=+xcvbitmanip -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-O3 -declare i32 @llvm.riscv.cv.bitmanip.extract(i32, i32) - define i32 @test.cv.extractr(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.extractr: ; CHECK: # %bb.0: @@ -33,8 +31,6 @@ define i32 @test.cv.extract1023(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.extractu(i32, i32) - define i32 @test.cv.extractur(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.extractur: ; CHECK: # %bb.0: @@ -53,8 +49,6 @@ define i32 @test.cv.extractu(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.insert(i32, i32, i32) - define i32 @test.cv.insert(i32 %c, i32 %a) { ; CHECK-LABEL: test.cv.insert: ; CHECK: # %bb.0: @@ -73,8 +67,6 @@ define i32 @test.cv.insertr(i32 %c, i32 %b, i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.bclr(i32, i32) - define i32 @test.cv.bclrr(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.bclrr: ; CHECK: # %bb.0: @@ -93,8 +85,6 @@ define i32 @test.cv.bclr(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.bset(i32, i32) - define i32 @test.cv.bsetr(i32 %a, i32 %b) { ; CHECK-LABEL: test.cv.bsetr: ; CHECK: # %bb.0: @@ -113,8 +103,6 @@ define i32 @test.cv.bset(i32 %a) { ret i32 %1 } -declare i32 @llvm.cttz.i32(i32, i1) - define i32 @test.cv.ff1(i32 %a) { ; CHECK-LABEL: test.cv.ff1: ; CHECK: # %bb.0: @@ -124,8 +112,6 @@ define i32 @test.cv.ff1(i32 %a) { ret i32 %1 } -declare i32 @llvm.ctlz.i32(i32, i1) - define i32 @test.cv.fl1(i32 %a) { ; CHECK-LABEL: test.cv.fl1: ; CHECK: # %bb.0: @@ -135,8 +121,6 @@ define i32 @test.cv.fl1(i32 %a) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.clb(i32) - define i32 @test.cv.clb(i32 %a) { ; CHECK-LABEL: test.cv.clb: ; CHECK: # %bb.0: @@ -146,8 +130,6 @@ define i32 @test.cv.clb(i32 %a) { ret i32 %1 } -declare i32 @llvm.ctpop(i32) - define i32 @test.cv.cnt(i32 %a) { ; CHECK-LABEL: test.cv.cnt: ; CHECK: # %bb.0: @@ -157,8 +139,6 @@ define i32 @test.cv.cnt(i32 %a) { ret i32 %1 } -declare i32 @llvm.fshl.i32(i32, i32, i32) - define i32 @test.llvm.fshl.imm(i32 %a) { ; CHECK-LABEL: test.llvm.fshl.imm: ; CHECK: # %bb.0: @@ -187,8 +167,6 @@ define i32 @test.llvm.fshl.reg(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.fshr.i32(i32, i32, i32) - define i32 @test.llvm.fshr.imm(i32 %a) { ; CHECK-LABEL: test.llvm.fshr.imm: ; CHECK: # %bb.0: @@ -208,8 +186,6 @@ define i32 @test.llvm.fshr.reg(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.bitmanip.bitrev(i32, i32, i32) - define i32 @test.cv.bitrev(i32 %a) { ; CHECK-LABEL: test.cv.bitrev: ; CHECK: # %bb.0: @@ -219,8 +195,6 @@ define i32 @test.cv.bitrev(i32 %a) { ret i32 %1 } -declare i32 @llvm.bitreverse(i32) - define i32 @test.llvm.bitrev(i32 %a) { ; CHECK-LABEL: test.llvm.bitrev: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xcvmac.ll b/llvm/test/CodeGen/RISCV/xcvmac.ll index 68efdf7210f7f..40cde9aba6734 100644 --- a/llvm/test/CodeGen/RISCV/xcvmac.ll +++ b/llvm/test/CodeGen/RISCV/xcvmac.ll @@ -2,8 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+m -mattr=+xcvmac -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -declare i32 @llvm.riscv.cv.mac.mac(i32, i32, i32) - define i32 @test.mac(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.mac: ; CHECK: # %bb.0: @@ -14,8 +12,6 @@ define i32 @test.mac(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.msu(i32, i32, i32) - define i32 @test.msu(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.msu: ; CHECK: # %bb.0: @@ -26,8 +22,6 @@ define i32 @test.msu(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.muluN(i32, i32, i32) - define i32 @test.muluN(i32 %a, i32 %b) { ; CHECK-LABEL: test.muluN: ; CHECK: # %bb.0: @@ -37,8 +31,6 @@ define i32 @test.muluN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhuN(i32, i32, i32) - define i32 @test.mulhhuN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhuN: ; CHECK: # %bb.0: @@ -48,8 +40,6 @@ define i32 @test.mulhhuN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulsN(i32, i32, i32) - define i32 @test.mulsN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulsN: ; CHECK: # %bb.0: @@ -59,8 +49,6 @@ define i32 @test.mulsN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhsN(i32, i32, i32) - define i32 @test.mulhhsN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhsN: ; CHECK: # %bb.0: @@ -70,8 +58,6 @@ define i32 @test.mulhhsN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.muluRN(i32, i32, i32) - define i32 @test.muluRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.muluRN: ; CHECK: # %bb.0: @@ -81,8 +67,6 @@ define i32 @test.muluRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhuRN(i32, i32, i32) - define i32 @test.mulhhuRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhuRN: ; CHECK: # %bb.0: @@ -92,8 +76,6 @@ define i32 @test.mulhhuRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulsRN(i32, i32, i32) - define i32 @test.mulsRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulsRN: ; CHECK: # %bb.0: @@ -103,8 +85,6 @@ define i32 @test.mulsRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.mulhhsRN(i32, i32, i32) - define i32 @test.mulhhsRN(i32 %a, i32 %b) { ; CHECK-LABEL: test.mulhhsRN: ; CHECK: # %bb.0: @@ -114,8 +94,6 @@ define i32 @test.mulhhsRN(i32 %a, i32 %b) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macuN(i32, i32, i32, i32) - define i32 @test.macuN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macuN: ; CHECK: # %bb.0: @@ -126,8 +104,6 @@ define i32 @test.macuN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhuN(i32, i32, i32, i32) - define i32 @test.machhuN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhuN: ; CHECK: # %bb.0: @@ -138,8 +114,6 @@ define i32 @test.machhuN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macsN(i32, i32, i32, i32) - define i32 @test.macsN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macsN: ; CHECK: # %bb.0: @@ -150,8 +124,6 @@ define i32 @test.macsN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhsN(i32, i32, i32, i32) - define i32 @test.machhsN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhsN: ; CHECK: # %bb.0: @@ -162,8 +134,6 @@ define i32 @test.machhsN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macuRN(i32, i32, i32, i32) - define i32 @test.macuRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macuRN: ; CHECK: # %bb.0: @@ -174,8 +144,6 @@ define i32 @test.macuRN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhuRN(i32, i32, i32, i32) - define i32 @test.machhuRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhuRN: ; CHECK: # %bb.0: @@ -186,8 +154,6 @@ define i32 @test.machhuRN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.macsRN(i32, i32, i32, i32) - define i32 @test.macsRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.macsRN: ; CHECK: # %bb.0: @@ -198,8 +164,6 @@ define i32 @test.macsRN(i32 %a, i32 %b, i32 %c) { ret i32 %1 } -declare i32 @llvm.riscv.cv.mac.machhsRN(i32, i32, i32, i32) - define i32 @test.machhsRN(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: test.machhsRN: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll index a7e24cecb4f26..92c5d2892cbdc 100644 --- a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll +++ b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll @@ -988,9 +988,6 @@ entry: ; Check that functions with varargs do not use save/restore code -declare void @llvm.va_start(ptr) -declare void @llvm.va_end(ptr) - define i32 @varargs(ptr %fmt, ...) { ; RV32IXQCCMP-LABEL: varargs: ; RV32IXQCCMP: # %bb.0: @@ -1437,8 +1434,6 @@ entry: ; Check that dynamic allocation calculations remain correct -declare ptr @llvm.stacksave() -declare void @llvm.stackrestore(ptr) declare void @notdead(ptr) define void @alloca(i32 %n) { @@ -3732,7 +3727,6 @@ define void @callee_no_irq() { } declare void @bar(ptr, ptr) -declare ptr @llvm.frameaddress.p0(i32 immarg) define i32 @use_fp(i32 %x) { ; RV32IXQCCMP-LABEL: use_fp: diff --git a/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll b/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll index 8568d88bceab6..5b5d3d856d878 100644 --- a/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll +++ b/llvm/test/CodeGen/RISCV/xqcibm-cto-clo-brev.ll @@ -6,21 +6,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbb,experimental-xqcibm -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBBXQCIBM -declare i8 @llvm.cttz.i8(i8, i1) -declare i16 @llvm.cttz.i16(i16, i1) -declare i32 @llvm.cttz.i32(i32, i1) -declare i64 @llvm.cttz.i64(i64, i1) - -declare i8 @llvm.ctlz.i8(i8, i1) -declare i16 @llvm.ctlz.i16(i16, i1) -declare i32 @llvm.ctlz.i32(i32, i1) -declare i64 @llvm.ctlz.i64(i64, i1) - -declare i8 @llvm.bitreverse.i8(i8) -declare i16 @llvm.bitreverse.i16(i16) -declare i32 @llvm.bitreverse.i32(i32) -declare i64 @llvm.bitreverse.i64(i64) - define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll index b48e039dd30a4..2fad19a653f1f 100644 --- a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll +++ b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll @@ -34,8 +34,6 @@ entry: ret void } -declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) - define void @test2(ptr nocapture %p) nounwind { ; RV32I-LABEL: test2: ; RV32I: # %bb.0: # %entry @@ -142,7 +140,6 @@ entry: ret void } - define ptr @test3(ptr %p) nounwind { ; RV32I-LABEL: test3: ; RV32I: # %bb.0: # %entry @@ -215,10 +212,6 @@ entry: ret void } -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) - -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) - define void @test4b() nounwind { ; RV32I-LABEL: test4b: ; RV32I: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll index 3efa9e58e65d3..eb1848965a9ba 100644 --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll @@ -24,8 +24,6 @@ ; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefix=RV64IZDINXZHINX %s -declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) - define half @sqrt_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: sqrt_f16: ; RV32IZFH: # %bb.0: @@ -60,8 +58,6 @@ define half @sqrt_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.floor.f16(half, metadata) - define half @floor_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: floor_f16: ; RV32IZFH: # %bb.0: @@ -132,8 +128,6 @@ define half @floor_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.ceil.f16(half, metadata) - define half @ceil_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: ceil_f16: ; RV32IZFH: # %bb.0: @@ -204,8 +198,6 @@ define half @ceil_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) - define half @trunc_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: trunc_f16: ; RV32IZFH: # %bb.0: @@ -276,8 +268,6 @@ define half @trunc_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata) - define half @rint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: rint_f16: ; RV32IZFH: # %bb.0: @@ -348,8 +338,6 @@ define half @rint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata) - define half @nearbyint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: nearbyint_f16: ; RV32IZFH: # %bb.0: @@ -420,8 +408,6 @@ define half @nearbyint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.round.f16(half, metadata) - define half @round_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: round_f16: ; RV32IZFH: # %bb.0: @@ -492,8 +478,6 @@ define half @round_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.roundeven.f16(half, metadata) - define half @roundeven_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: roundeven_f16: ; RV32IZFH: # %bb.0: @@ -564,8 +548,6 @@ define half @roundeven_f16(half %a) nounwind strictfp { ret half %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half, metadata, metadata) - define iXLen @lrint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: lrint_f16: ; RV32IZFH: # %bb.0: @@ -600,8 +582,6 @@ define iXLen @lrint_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f16(half, metadata) - define iXLen @lround_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: lround_f16: ; RV32IZFH: # %bb.0: @@ -636,8 +616,6 @@ define iXLen @lround_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata) - define i64 @llrint_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: llrint_f16: ; RV32IZFH: # %bb.0: @@ -687,8 +665,6 @@ define i64 @llrint_f16(half %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata) - define i64 @llround_f16(half %a) nounwind strictfp { ; RV32IZFH-LABEL: llround_f16: ; RV32IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll index ba2ea57a00822..b71027ee278d9 100644 --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll @@ -26,8 +26,6 @@ ; These intrinsics require half to be a legal type. -declare iXLen @llvm.lrint.iXLen.f16(half) - define iXLen @lrint_f16(half %a) nounwind { ; RV32IZFH-LABEL: lrint_f16: ; RV32IZFH: # %bb.0: @@ -72,9 +70,6 @@ define iXLen @lrint_f16(half %a) nounwind { ret iXLen %1 } -declare i32 @llvm.lround.i32.f16(half) -declare i64 @llvm.lround.i64.f16(half) - define iXLen @lround_f16(half %a) nounwind { ; RV32IZFH-LABEL: lround_f16: ; RV32IZFH: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll index 214ea46d3130d..0529819a4f4e2 100644 --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll @@ -24,8 +24,6 @@ ; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ ; RUN: FileCheck -check-prefix=RV64IZDINXZHINXMIN %s -declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) - define half @sqrt_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: sqrt_f16: ; RV32IZFHMIN: # %bb.0: @@ -72,8 +70,6 @@ define half @sqrt_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.floor.f16(half, metadata) - define half @floor_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: floor_f16: ; RV32IZFHMIN: # %bb.0: @@ -144,8 +140,6 @@ define half @floor_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.ceil.f16(half, metadata) - define half @ceil_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: ceil_f16: ; RV32IZFHMIN: # %bb.0: @@ -216,8 +210,6 @@ define half @ceil_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.trunc.f16(half, metadata) - define half @trunc_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: trunc_f16: ; RV32IZFHMIN: # %bb.0: @@ -288,8 +280,6 @@ define half @trunc_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata) - define half @rint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: rint_f16: ; RV32IZFHMIN: # %bb.0: @@ -360,8 +350,6 @@ define half @rint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata) - define half @nearbyint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: nearbyint_f16: ; RV32IZFHMIN: # %bb.0: @@ -432,8 +420,6 @@ define half @nearbyint_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.round.f16(half, metadata) - define half @round_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: round_f16: ; RV32IZFHMIN: # %bb.0: @@ -504,8 +490,6 @@ define half @round_f16(half %a) nounwind strictfp { ret half %1 } -declare half @llvm.experimental.constrained.roundeven.f16(half, metadata) - define half @roundeven_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: roundeven_f16: ; RV32IZFHMIN: # %bb.0: @@ -576,8 +560,6 @@ define half @roundeven_f16(half %a) nounwind strictfp { ret half %1 } -declare iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half, metadata, metadata) - define iXLen @lrint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: lrint_f16: ; RV32IZFHMIN: # %bb.0: @@ -618,8 +600,6 @@ define iXLen @lrint_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare iXLen @llvm.experimental.constrained.lround.iXLen.f16(half, metadata) - define iXLen @lround_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: lround_f16: ; RV32IZFHMIN: # %bb.0: @@ -660,8 +640,6 @@ define iXLen @lround_f16(half %a) nounwind strictfp { ret iXLen %1 } -declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata) - define i64 @llrint_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: llrint_f16: ; RV32IZFHMIN: # %bb.0: @@ -714,8 +692,6 @@ define i64 @llrint_f16(half %a) nounwind strictfp { ret i64 %1 } -declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata) - define i64 @llround_f16(half %a) nounwind strictfp { ; RV32IZFHMIN-LABEL: llround_f16: ; RV32IZFHMIN: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll index 0a494878926d1..a87f2dda9cb42 100644 --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll @@ -26,8 +26,6 @@ ; These intrinsics require half to be a legal type. -declare iXLen @llvm.lrint.iXLen.f16(half) - define iXLen @lrint_f16(half %a) nounwind { ; RV32IZFHMIN-LABEL: lrint_f16: ; RV32IZFHMIN: # %bb.0: @@ -80,8 +78,6 @@ define iXLen @lrint_f16(half %a) nounwind { ret iXLen %1 } -declare iXLen @llvm.lround.iXLen.f16(half) - define iXLen @lround_f16(half %a) nounwind { ; RV32IZFHMIN-LABEL: lround_f16: ; RV32IZFHMIN: # %bb.0: diff --git a/llvm/test/CodeGen/SPIRV/OpVariable_order.ll b/llvm/test/CodeGen/SPIRV/OpVariable_order.ll index 1e94be0886307..a43a4d66d04bb 100644 --- a/llvm/test/CodeGen/SPIRV/OpVariable_order.ll +++ b/llvm/test/CodeGen/SPIRV/OpVariable_order.ll @@ -13,7 +13,9 @@ define void @main() { entry: %0 = alloca <2 x i32>, align 4 + store <2 x i32> zeroinitializer, ptr %0, align 4 %1 = getelementptr <2 x i32>, ptr %0, i32 0, i32 0 %2 = alloca float, align 4 + store float 0.0, ptr %2, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll b/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll index 9e91854de1172..b0bad1819a25d 100644 --- a/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll +++ b/llvm/test/CodeGen/SPIRV/SpecConstants/restore-spec-type.ll @@ -29,9 +29,12 @@ %Struct7 = type [2 x %Struct] %Nested = type { %Struct7 } +@G = global %Struct zeroinitializer + define spir_kernel void @foo(ptr addrspace(4) %arg1, ptr addrspace(4) %arg2) { entry: %var = alloca %Struct + store %Struct zeroinitializer, ptr %var %r1 = call %Struct @_Z29__spirv_SpecConstantComposite_1(float 1.0) store %Struct %r1, ptr addrspace(4) %arg1 %r2 = call %Struct7 @_Z29__spirv_SpecConstantComposite_2(%Struct %r1, %Struct %r1) diff --git a/llvm/test/CodeGen/SPIRV/basic_float_types.ll b/llvm/test/CodeGen/SPIRV/basic_float_types.ll index a0ba97e1d1f14..6cdc67bbf24ee 100644 --- a/llvm/test/CodeGen/SPIRV/basic_float_types.ll +++ b/llvm/test/CodeGen/SPIRV/basic_float_types.ll @@ -2,6 +2,9 @@ ; RUN: llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_bfloat16 %s -o - | FileCheck %s ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown --spirv-ext=+SPV_KHR_bfloat16 %s -o - -filetype=obj | spirv-val %} +// TODO: Open bug bfloat16 cannot be stored to. +XFAIL: * + define void @main() { entry: @@ -49,50 +52,66 @@ entry: ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_half]] Function %half_Val = alloca half, align 2 + store half 0.0, ptr %half_Val, align 2 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_bfloat]] Function %bfloat_Val = alloca bfloat, align 2 + store bfloat 0.0, ptr %bfloat_Val, align 2 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_float]] Function %float_Val = alloca float, align 4 + store float 0.0, ptr %float_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_double]] Function %double_Val = alloca double, align 8 + store double 0.0, ptr %double_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2half]] Function %half2_Val = alloca <2 x half>, align 4 + store <2 x half> zeroinitializer, ptr %half2_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3half]] Function %half3_Val = alloca <3 x half>, align 8 + store <3 x half> zeroinitializer, ptr %half3_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4half]] Function %half4_Val = alloca <4 x half>, align 8 + store <4 x half> zeroinitializer, ptr %half4_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2bfloat]] Function %bfloat2_Val = alloca <2 x bfloat>, align 4 + store <2 x bfloat> zeroinitializer, ptr %bfloat2_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3bfloat]] Function %bfloat3_Val = alloca <3 x bfloat>, align 8 + store <3 x bfloat> zeroinitializer, ptr %bfloat3_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4bfloat]] Function %bfloat4_Val = alloca <4 x bfloat>, align 8 + store <4 x bfloat> zeroinitializer, ptr %bfloat4_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2float]] Function %float2_Val = alloca <2 x float>, align 8 + store <2 x float> zeroinitializer, ptr %float2_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3float]] Function %float3_Val = alloca <3 x float>, align 16 + store <3 x float> zeroinitializer, ptr %float3_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4float]] Function %float4_Val = alloca <4 x float>, align 16 + store <4 x float> zeroinitializer, ptr %float4_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2double]] Function %double2_Val = alloca <2 x double>, align 16 + store <2 x double> zeroinitializer, ptr %double2_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3double]] Function %double3_Val = alloca <3 x double>, align 32 + store <3 x double> zeroinitializer, ptr %double3_Val, align 32 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4double]] Function %double4_Val = alloca <4 x double>, align 32 + store <4 x double> zeroinitializer, ptr %double4_Val, align 32 ret void } diff --git a/llvm/test/CodeGen/SPIRV/basic_int_types.ll b/llvm/test/CodeGen/SPIRV/basic_int_types.ll index 5aa7aaf6fbd01..1ed241eed4019 100644 --- a/llvm/test/CodeGen/SPIRV/basic_int_types.ll +++ b/llvm/test/CodeGen/SPIRV/basic_int_types.ll @@ -37,39 +37,51 @@ entry: ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_short]] Function %int16_t_Val = alloca i16, align 2 + store i16 0, ptr %int16_t_Val, align 2 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_int]] Function %int_Val = alloca i32, align 4 + store i32 0, ptr %int_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_long]] Function %int64_t_Val = alloca i64, align 8 + store i64 0, ptr %int64_t_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2short]] Function %int16_t2_Val = alloca <2 x i16>, align 4 + store <2 x i16> zeroinitializer, ptr %int16_t2_Val, align 4 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3short]] Function %int16_t3_Val = alloca <3 x i16>, align 8 + store <3 x i16> zeroinitializer, ptr %int16_t3_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4short]] Function %int16_t4_Val = alloca <4 x i16>, align 8 + store <4 x i16> zeroinitializer, ptr %int16_t4_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2int]] Function %int2_Val = alloca <2 x i32>, align 8 + store <2 x i32> zeroinitializer, ptr %int2_Val, align 8 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3int]] Function %int3_Val = alloca <3 x i32>, align 16 + store <3 x i32> zeroinitializer, ptr %int3_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4int]] Function %int4_Val = alloca <4 x i32>, align 16 + store <4 x i32> zeroinitializer, ptr %int4_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v2long]] Function %int64_t2_Val = alloca <2 x i64>, align 16 + store <2 x i64> zeroinitializer, ptr %int64_t2_Val, align 16 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v3long]] Function %int64_t3_Val = alloca <3 x i64>, align 32 + store <3 x i64> zeroinitializer, ptr %int64_t3_Val, align 32 ; CHECK: %[[#]] = OpVariable %[[#ptr_Function_v4long]] Function %int64_t4_Val = alloca <4 x i64>, align 32 + store <4 x i64> zeroinitializer, ptr %int64_t4_Val, align 32 ret void } diff --git a/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll b/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll index 56b5f48715533..f3c8f9967211a 100644 --- a/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll +++ b/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll @@ -6,39 +6,51 @@ define void @main() { entry: ; CHECK: %int16_t_Val = OpVariable %_ptr_Function_ushort Function %int16_t_Val = alloca i16, align 2 + store i16 0, i16* %int16_t_Val, align 2 ; CHECK: %int_Val = OpVariable %_ptr_Function_uint Function %int_Val = alloca i32, align 4 + store i32 0, i32* %int_Val, align 4 ; CHECK: %int64_t_Val = OpVariable %_ptr_Function_ulong Function %int64_t_Val = alloca i64, align 8 + store i64 0, i64* %int64_t_Val, align 8 ; CHECK: %int16_t2_Val = OpVariable %_ptr_Function_v2ushort Function %int16_t2_Val = alloca <2 x i16>, align 4 + store <2 x i16> zeroinitializer, <2 x i16>* %int16_t2_Val, align 4 ; CHECK: %int16_t3_Val = OpVariable %_ptr_Function_v3ushort Function %int16_t3_Val = alloca <3 x i16>, align 8 + store <3 x i16> zeroinitializer, <3 x i16>* %int16_t3_Val, align 8 ; CHECK: %int16_t4_Val = OpVariable %_ptr_Function_v4ushort Function %int16_t4_Val = alloca <4 x i16>, align 8 + store <4 x i16> zeroinitializer, <4 x i16>* %int16_t4_Val, align 8 ; CHECK: %int2_Val = OpVariable %_ptr_Function_v2uint Function %int2_Val = alloca <2 x i32>, align 8 + store <2 x i32> zeroinitializer, <2 x i32>* %int2_Val, align 8 ; CHECK: %int3_Val = OpVariable %_ptr_Function_v3uint Function %int3_Val = alloca <3 x i32>, align 16 + store <3 x i32> zeroinitializer, <3 x i32>* %int3_Val, align 16 ; CHECK: %int4_Val = OpVariable %_ptr_Function_v4uint Function %int4_Val = alloca <4 x i32>, align 16 + store <4 x i32> zeroinitializer, <4 x i32>* %int4_Val, align 16 ; CHECK: %int64_t2_Val = OpVariable %_ptr_Function_v2ulong Function %int64_t2_Val = alloca <2 x i64>, align 16 + store <2 x i64> zeroinitializer, <2 x i64>* %int64_t2_Val, align 16 ; CHECK: %int64_t3_Val = OpVariable %_ptr_Function_v3ulong Function %int64_t3_Val = alloca <3 x i64>, align 32 + store <3 x i64> zeroinitializer, <3 x i64>* %int64_t3_Val, align 32 ; CHECK: %int64_t4_Val = OpVariable %_ptr_Function_v4ulong Function %int64_t4_Val = alloca <4 x i64>, align 32 + store <4 x i64> zeroinitializer, <4 x i64>* %int64_t4_Val, align 32 ret void } diff --git a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll index 39a755e736081..bca90f4ebd151 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll @@ -33,6 +33,28 @@ target triple = "spirv32-unknown-unknown" ; CHECK: [[SubgroupId]] = OpVariable [[I32PTR]] Input ; CHECK: [[SubgroupLocalInvocationId]] = OpVariable [[I32PTR]] Input +@G_spv_num_workgroups_0 = global i32 0 +@G_spv_num_workgroups_1 = global i32 0 +@G_spv_num_workgroups_2 = global i32 0 +@G_spv_workgroup_size_0 = global i32 0 +@G_spv_workgroup_size_1 = global i32 0 +@G_spv_workgroup_size_2 = global i32 0 +@G_spv_group_id_0 = global i32 0 +@G_spv_group_id_1 = global i32 0 +@G_spv_group_id_2 = global i32 0 +@G_spv_thread_id_in_group_0 = global i32 0 +@G_spv_thread_id_in_group_1 = global i32 0 +@G_spv_thread_id_in_group_2 = global i32 0 +@G_spv_thread_id_0 = global i32 0 +@G_spv_thread_id_1 = global i32 0 +@G_spv_thread_id_2 = global i32 0 +@G_spv_global_size_0 = global i32 0 +@G_spv_global_size_1 = global i32 0 +@G_spv_global_size_2 = global i32 0 +@G_spv_global_offset_0 = global i32 0 +@G_spv_global_offset_1 = global i32 0 +@G_spv_global_offset_2 = global i32 0 + ; Function Attrs: convergent noinline norecurse nounwind optnone define spir_func void @test_id_and_range() { entry: @@ -44,66 +66,87 @@ entry: ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.num.workgroups = call i32 @llvm.spv.num.workgroups.i32(i32 0) + store i32 %spv.num.workgroups, i32* @G_spv_num_workgroups_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.num.workgroups1 = call i32 @llvm.spv.num.workgroups.i32(i32 1) + store i32 %spv.num.workgroups1, i32* @G_spv_num_workgroups_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.num.workgroups2 = call i32 @llvm.spv.num.workgroups.i32(i32 2) + store i32 %spv.num.workgroups2, i32* @G_spv_num_workgroups_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.workgroup.size = call i32 @llvm.spv.workgroup.size.i32(i32 0) + store i32 %spv.workgroup.size, i32* @G_spv_workgroup_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.workgroup.size3 = call i32 @llvm.spv.workgroup.size.i32(i32 1) + store i32 %spv.workgroup.size3, i32* @G_spv_workgroup_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.workgroup.size4 = call i32 @llvm.spv.workgroup.size.i32(i32 2) + store i32 %spv.workgroup.size4, i32* @G_spv_workgroup_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.group.id = call i32 @llvm.spv.group.id.i32(i32 0) + store i32 %spv.group.id, i32* @G_spv_group_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.group.id5 = call i32 @llvm.spv.group.id.i32(i32 1) + store i32 %spv.group.id5, i32* @G_spv_group_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.group.id6 = call i32 @llvm.spv.group.id.i32(i32 2) + store i32 %spv.group.id6, i32* @G_spv_group_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.thread.id.in.group = call i32 @llvm.spv.thread.id.in.group.i32(i32 0) + store i32 %spv.thread.id.in.group, i32* @G_spv_thread_id_in_group_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.thread.id.in.group7 = call i32 @llvm.spv.thread.id.in.group.i32(i32 1) + store i32 %spv.thread.id.in.group7, i32* @G_spv_thread_id_in_group_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.thread.id.in.group8 = call i32 @llvm.spv.thread.id.in.group.i32(i32 2) + store i32 %spv.thread.id.in.group8, i32* @G_spv_thread_id_in_group_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.thread.id = call i32 @llvm.spv.thread.id.i32(i32 0) + store i32 %spv.thread.id, i32* @G_spv_thread_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.thread.id9 = call i32 @llvm.spv.thread.id.i32(i32 1) + store i32 %spv.thread.id9, i32* @G_spv_thread_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.thread.id10 = call i32 @llvm.spv.thread.id.i32(i32 2) + store i32 %spv.thread.id10, i32* @G_spv_thread_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.num.workgroups11 = call i32 @llvm.spv.global.size.i32(i32 0) + store i32 %spv.num.workgroups11, i32* @G_spv_global_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.num.workgroups12 = call i32 @llvm.spv.global.size.i32(i32 1) + store i32 %spv.num.workgroups12, i32* @G_spv_global_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.num.workgroups13 = call i32 @llvm.spv.global.size.i32(i32 2) + store i32 %spv.num.workgroups13, i32* @G_spv_global_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0 %spv.global.offset = call i32 @llvm.spv.global.offset.i32(i32 0) + store i32 %spv.global.offset, i32* @G_spv_global_offset_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1 %spv.global.offset14 = call i32 @llvm.spv.global.offset.i32(i32 1) + store i32 %spv.global.offset14, i32* @G_spv_global_offset_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2 %spv.global.offset15 = call i32 @llvm.spv.global.offset.i32(i32 2) + store i32 %spv.global.offset15, i32* @G_spv_global_offset_2 ; CHECK: OpLoad %5 [[SubgroupSize]] %0 = call i32 @llvm.spv.subgroup.size() store i32 %0, ptr %ssize, align 4 diff --git a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll index dcdf8992ce1c4..26c2d866d14c7 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll @@ -34,6 +34,28 @@ target triple = "spirv64-unknown-unknown" ; CHECK: [[SubgroupId]] = OpVariable [[I32PTR]] Input ; CHECK: [[SubgroupLocalInvocationId]] = OpVariable [[I32PTR]] Input +@G_spv_num_workgroups_0 = global i64 0 +@G_spv_num_workgroups_1 = global i64 0 +@G_spv_num_workgroups_2 = global i64 0 +@G_spv_workgroup_size_0 = global i64 0 +@G_spv_workgroup_size_1 = global i64 0 +@G_spv_workgroup_size_2 = global i64 0 +@G_spv_group_id_0 = global i64 0 +@G_spv_group_id_1 = global i64 0 +@G_spv_group_id_2 = global i64 0 +@G_spv_thread_id_in_group_0 = global i64 0 +@G_spv_thread_id_in_group_1 = global i64 0 +@G_spv_thread_id_in_group_2 = global i64 0 +@G_spv_thread_id_0 = global i64 0 +@G_spv_thread_id_1 = global i64 0 +@G_spv_thread_id_2 = global i64 0 +@G_spv_global_size_0 = global i64 0 +@G_spv_global_size_1 = global i64 0 +@G_spv_global_size_2 = global i64 0 +@G_spv_global_offset_0 = global i64 0 +@G_spv_global_offset_1 = global i64 0 +@G_spv_global_offset_2 = global i64 0 + ; Function Attrs: convergent noinline norecurse nounwind optnone define spir_func void @test_id_and_range() { entry: @@ -45,66 +67,87 @@ entry: ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.num.workgroups = call i64 @llvm.spv.num.workgroups.i64(i32 0) + store i64 %spv.num.workgroups, i64* @G_spv_num_workgroups_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.num.workgroups1 = call i64 @llvm.spv.num.workgroups.i64(i32 1) + store i64 %spv.num.workgroups1, i64* @G_spv_num_workgroups_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.num.workgroups2 = call i64 @llvm.spv.num.workgroups.i64(i32 2) + store i64 %spv.num.workgroups2, i64* @G_spv_num_workgroups_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.workgroup.size = call i64 @llvm.spv.workgroup.size.i64(i32 0) + store i64 %spv.workgroup.size, i64* @G_spv_workgroup_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.workgroup.size3 = call i64 @llvm.spv.workgroup.size.i64(i32 1) + store i64 %spv.workgroup.size3, i64* @G_spv_workgroup_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.workgroup.size4 = call i64 @llvm.spv.workgroup.size.i64(i32 2) + store i64 %spv.workgroup.size4, i64* @G_spv_workgroup_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.group.id = call i64 @llvm.spv.group.id.i64(i32 0) + store i64 %spv.group.id, i64* @G_spv_group_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.group.id5 = call i64 @llvm.spv.group.id.i64(i32 1) + store i64 %spv.group.id5, i64* @G_spv_group_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.group.id6 = call i64 @llvm.spv.group.id.i64(i32 2) + store i64 %spv.group.id6, i64* @G_spv_group_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.thread.id.in.group = call i64 @llvm.spv.thread.id.in.group.i64(i32 0) + store i64 %spv.thread.id.in.group, i64* @G_spv_thread_id_in_group_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.thread.id.in.group7 = call i64 @llvm.spv.thread.id.in.group.i64(i32 1) + store i64 %spv.thread.id.in.group7, i64* @G_spv_thread_id_in_group_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.thread.id.in.group8 = call i64 @llvm.spv.thread.id.in.group.i64(i32 2) + store i64 %spv.thread.id.in.group8, i64* @G_spv_thread_id_in_group_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.thread.id = call i64 @llvm.spv.thread.id.i64(i32 0) + store i64 %spv.thread.id, i64* @G_spv_thread_id_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.thread.id9 = call i64 @llvm.spv.thread.id.i64(i32 1) + store i64 %spv.thread.id9, i64* @G_spv_thread_id_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.thread.id10 = call i64 @llvm.spv.thread.id.i64(i32 2) + store i64 %spv.thread.id10, i64* @G_spv_thread_id_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.num.workgroups11 = call i64 @llvm.spv.global.size.i64(i32 0) + store i64 %spv.num.workgroups11, i64* @G_spv_global_size_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.num.workgroups12 = call i64 @llvm.spv.global.size.i64(i32 1) + store i64 %spv.num.workgroups12, i64* @G_spv_global_size_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.num.workgroups13 = call i64 @llvm.spv.global.size.i64(i32 2) + store i64 %spv.num.workgroups13, i64* @G_spv_global_size_2 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0 %spv.global.offset = call i64 @llvm.spv.global.offset.i64(i32 0) + store i64 %spv.global.offset, i64* @G_spv_global_offset_0 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1 %spv.global.offset14 = call i64 @llvm.spv.global.offset.i64(i32 1) + store i64 %spv.global.offset14, i64* @G_spv_global_offset_1 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]] ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2 %spv.global.offset15 = call i64 @llvm.spv.global.offset.i64(i32 2) + store i64 %spv.global.offset15, i64* @G_spv_global_offset_2 ; CHECK: OpLoad %5 [[SubgroupSize]] %0 = call i32 @llvm.spv.subgroup.size() store i32 %0, ptr %ssize, align 4 diff --git a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll index 0c9b29de890d4..8dd9b387a6d84 100644 --- a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll +++ b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll @@ -81,17 +81,36 @@ @__spirv_BuiltInSubgroupId = external addrspace(1) global i32 @__spirv_BuiltInSubgroupLocalInvocationId = external addrspace(1) global i32 +@G_r1 = global i64 0 +@G_r2 = global i64 0 +@G_r3 = global i32 0 +@G_r4 = global i32 0 +@G_r5 = global i32 0 +@G_r6 = global i32 0 +@G_r7 = global i32 0 +@G_r8 = global i32 0 +@G_r9 = global i32 0 + define spir_kernel void @_Z1wv() { entry: %r1 = tail call spir_func i64 @get_global_linear_id() + store i64 %r1, i64* @G_r1 %r2 = tail call spir_func i64 @get_local_linear_id() + store i64 %r2, i64* @G_r2 %r3 = tail call spir_func i32 @get_work_dim() + store i32 %r3, i32* @G_r3 %r4 = tail call spir_func i32 @get_sub_group_size() + store i32 %r4, i32* @G_r4 %r5 = tail call spir_func i32 @get_max_sub_group_size() + store i32 %r5, i32* @G_r5 %r6 = tail call spir_func i32 @get_num_sub_groups() + store i32 %r6, i32* @G_r6 %r7 = tail call spir_func i32 @get_enqueued_num_sub_groups() + store i32 %r7, i32* @G_r7 %r8 = tail call spir_func i32 @get_sub_group_id() + store i32 %r8, i32* @G_r8 %r9 = tail call spir_func i32 @get_sub_group_local_id() + store i32 %r9, i32* @G_r9 ret void } diff --git a/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll b/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll index 3e0d0cc4cd8e2..d260c9f94d4ad 100644 --- a/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll +++ b/llvm/test/CodeGen/SPIRV/debug-info/debug-type-pointer.ll @@ -126,6 +126,7 @@ define spir_func i32 @test0() !dbg !17 { %14 = load ptr addrspace(4), ptr %11, align 4, !dbg !65 store ptr addrspace(4) %14, ptr %12, align 4, !dbg !64 #dbg_declare(ptr %13, !66, !DIExpression(DW_OP_constu, 0, DW_OP_swap, DW_OP_xderef), !70) + store [8 x i32] zeroinitializer, ptr %13, align 4 ret i32 0, !dbg !71 } @@ -169,6 +170,7 @@ define spir_func i32 @test1() !dbg !72 { %14 = load ptr addrspace(4), ptr %11, align 4, !dbg !97 store ptr addrspace(4) %14, ptr %12, align 4, !dbg !96 #dbg_declare(ptr %13, !98, !DIExpression(DW_OP_constu, 0, DW_OP_swap, DW_OP_xderef), !99) + store [8 x i32] zeroinitializer, ptr %13, align 4 ret i32 0, !dbg !100 } diff --git a/llvm/test/CodeGen/SPIRV/event-zero-const.ll b/llvm/test/CodeGen/SPIRV/event-zero-const.ll index 523d2ad9825f3..2bf8259e78785 100644 --- a/llvm/test/CodeGen/SPIRV/event-zero-const.ll +++ b/llvm/test/CodeGen/SPIRV/event-zero-const.ll @@ -12,11 +12,15 @@ ; CHECK: OpINotEqual %[[#]] %[[#]] %[[#LongNull]] ; CHECK: OpGroupAsyncCopy %[[#EventTy]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#EventNull]] +@G_r1 = global i1 0 +@G_e1 = global target("spirv.Event") poison define weak_odr dso_local spir_kernel void @foo(i64 %_arg_i, ptr addrspace(1) %_arg_ptr, ptr addrspace(3) %_arg_local) { entry: %r1 = icmp ne i64 %_arg_i, 0 + store i1 %r1, ptr @G_r1 %e1 = tail call spir_func target("spirv.Event") @__spirv_GroupAsyncCopy(i32 2, ptr addrspace(3) %_arg_local, ptr addrspace(1) %_arg_ptr, i64 1, i64 1, target("spirv.Event") zeroinitializer) + store target("spirv.Event") %e1, ptr @G_e1 ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll index e5736b88b63a3..a9a0d3358f8cc 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll @@ -11,15 +11,22 @@ @G1 = addrspace(1) constant { [3 x ptr addrspace(4)] } { [3 x ptr addrspace(4)] [ptr addrspace(4) null, ptr addrspace(4) addrspacecast (ptr @foo to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr @bar to ptr addrspace(4))] } @G2 = addrspace(1) constant { [3 x ptr addrspace(4)] } { [3 x ptr addrspace(4)] [ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr @bar to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr @foo to ptr addrspace(4))] } +@G_r1_foo = global ptr addrspace(4) null +@G_r2_foo = global ptr addrspace(4) null +@G_r1_bar = global ptr addrspace(4) null + define void @foo(ptr addrspace(4) %p) { entry: %r1 = addrspacecast ptr @foo to ptr addrspace(4) + store ptr addrspace(4) %r1, ptr @G_r1_foo %r2 = addrspacecast ptr null to ptr addrspace(4) + store ptr addrspace(4) %r2, ptr @G_r2_foo ret void } define void @bar(ptr addrspace(4) %p) { entry: %r1 = addrspacecast ptr @bar to ptr addrspace(4) + store ptr addrspace(4) %r1, ptr @G_r1_bar ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll index 22668e71fb257..92652f1faefc0 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bfloat16/bfloat16.ll @@ -12,11 +12,16 @@ target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" target triple = "spir64-unknown-unknown" +@G1 = global bfloat 0.0 +@G2 = global <2 x bfloat> zeroinitializer + define spir_kernel void @test() { entry: %addr1 = alloca bfloat %addr2 = alloca <2 x bfloat> %data1 = load bfloat, ptr %addr1 %data2 = load <2 x bfloat>, ptr %addr2 + store bfloat %data1, ptr @G1 + store <2 x bfloat> %data2, ptr @G2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll index d3fe9e43450cd..81497f26f1aef 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll @@ -79,6 +79,54 @@ ; CHECK: OpDecorate %[[#maxResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform ; CHECK: OpDecorate %[[#maxCommonResV]] FPFastMathMode NotNaN|NotInf +@G_addRes = global float 0.0 +@G_subRes = global float 0.0 +@G_mulRes = global float 0.0 +@G_divRes = global float 0.0 +@G_remRes = global float 0.0 +@G_negRes = global float 0.0 +@G_oeqRes = global i1 0 +@G_oneRes = global i1 0 +@G_oltRes = global i1 0 +@G_ogtRes = global i1 0 +@G_oleRes = global i1 0 +@G_ogeRes = global i1 0 +@G_ordRes = global i1 0 +@G_ueqRes = global i1 0 +@G_uneRes = global i1 0 +@G_ultRes = global i1 0 +@G_ugtRes = global i1 0 +@G_uleRes = global i1 0 +@G_ugeRes = global i1 0 +@G_unoRes = global i1 0 +@G_modRes = global float 0.0 +@G_maxRes = global float 0.0 +@G_maxCommonRes = global float 0.0 + +@G_addResV = global <2 x float> zeroinitializer +@G_subResV = global <2 x float> zeroinitializer +@G_mulResV = global <2 x float> zeroinitializer +@G_divResV = global <2 x float> zeroinitializer +@G_remResV = global <2 x float> zeroinitializer +@G_negResV = global <2 x float> zeroinitializer +@G_oeqResV = global <2 x i1> zeroinitializer +@G_oneResV = global <2 x i1> zeroinitializer +@G_oltResV = global <2 x i1> zeroinitializer +@G_ogtResV = global <2 x i1> zeroinitializer +@G_oleResV = global <2 x i1> zeroinitializer +@G_ogeResV = global <2 x i1> zeroinitializer +@G_ordResV = global <2 x i1> zeroinitializer +@G_ueqResV = global <2 x i1> zeroinitializer +@G_uneResV = global <2 x i1> zeroinitializer +@G_ultResV = global <2 x i1> zeroinitializer +@G_ugtResV = global <2 x i1> zeroinitializer +@G_uleResV = global <2 x i1> zeroinitializer +@G_ugeResV = global <2 x i1> zeroinitializer +@G_unoResV = global <2 x i1> zeroinitializer +@G_modResV = global <2 x float> zeroinitializer +@G_maxResV = global <2 x float> zeroinitializer +@G_maxCommonResV = global <2 x float> zeroinitializer + ; Function Attrs: convergent mustprogress nofree nounwind willreturn memory(none) declare spir_func float @_Z4fmodff(float, float) declare dso_local spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 @@ -91,55 +139,101 @@ declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z23__spirv_ define weak_odr dso_local spir_kernel void @foo(float %1, float %2) { entry: %addRes = fadd float %1, %2 + store float %addRes, float* @G_addRes %subRes = fsub nnan float %1, %2 + store float %subRes, float* @G_subRes %mulRes = fmul ninf float %1, %2 + store float %mulRes, float* @G_mulRes %divRes = fdiv nsz float %1, %2 + store float %divRes, float* @G_divRes %remRes = frem arcp float %1, %2 + store float %remRes, float* @G_remRes %negRes = fneg fast float %1 + store float %negRes, float* @G_negRes %oeqRes = fcmp nnan ninf oeq float %1, %2 + store i1 %oeqRes, i1* @G_oeqRes %oneRes = fcmp one float %1, %2, !spirv.Decorations !3 + store i1 %oneRes, i1* @G_oneRes %oltRes = fcmp nnan olt float %1, %2, !spirv.Decorations !3 + store i1 %oltRes, i1* @G_oltRes %ogtRes = fcmp ninf ogt float %1, %2, !spirv.Decorations !3 + store i1 %ogtRes, i1* @G_ogtRes %oleRes = fcmp nsz ole float %1, %2, !spirv.Decorations !3 + store i1 %oleRes, i1* @G_oleRes %ogeRes = fcmp arcp oge float %1, %2, !spirv.Decorations !3 + store i1 %ogeRes, i1* @G_ogeRes %ordRes = fcmp fast ord float %1, %2, !spirv.Decorations !3 + store i1 %ordRes, i1* @G_ordRes %ueqRes = fcmp nnan ninf ueq float %1, %2, !spirv.Decorations !3 + store i1 %ueqRes, i1* @G_ueqRes %uneRes = fcmp une float %1, %2, !spirv.Decorations !3 + store i1 %uneRes, i1* @G_uneRes %ultRes = fcmp ult float %1, %2, !spirv.Decorations !3 + store i1 %ultRes, i1* @G_ultRes %ugtRes = fcmp ugt float %1, %2, !spirv.Decorations !3 + store i1 %ugtRes, i1* @G_ugtRes %uleRes = fcmp ule float %1, %2, !spirv.Decorations !3 + store i1 %uleRes, i1* @G_uleRes %ugeRes = fcmp uge float %1, %2, !spirv.Decorations !3 + store i1 %ugeRes, i1* @G_ugeRes %unoRes = fcmp uno float %1, %2, !spirv.Decorations !3 + store i1 %unoRes, i1* @G_unoRes %modRes = call spir_func float @_Z4fmodff(float %1, float %2) + store float %modRes, float* @G_modRes %maxRes = tail call fast spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + store float %maxRes, float* @G_maxRes %maxCommonRes = tail call spir_func noundef float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + store float %maxCommonRes, float* @G_maxCommonRes ret void } define weak_odr dso_local spir_kernel void @fooV(<2 x float> %v1, <2 x float> %v2) { %addResV = fadd <2 x float> %v1, %v2 + store <2 x float> %addResV, <2 x float>* @G_addResV %subResV = fsub nnan <2 x float> %v1, %v2 + store <2 x float> %subResV, <2 x float>* @G_subResV %mulResV = fmul ninf <2 x float> %v1, %v2 + store <2 x float> %mulResV, <2 x float>* @G_mulResV %divResV = fdiv nsz <2 x float> %v1, %v2 + store <2 x float> %divResV, <2 x float>* @G_divResV %remResV = frem arcp <2 x float> %v1, %v2 + store <2 x float> %remResV, <2 x float>* @G_remResV %negResV = fneg fast <2 x float> %v1 + store <2 x float> %negResV, <2 x float>* @G_negResV %oeqResV = fcmp nnan ninf oeq <2 x float> %v1, %v2 + store <2 x i1> %oeqResV, <2 x i1>* @G_oeqResV %oneResV = fcmp one <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %oneResV, <2 x i1>* @G_oneResV %oltResV = fcmp nnan olt <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %oltResV, <2 x i1>* @G_oltResV %ogtResV = fcmp ninf ogt <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ogtResV, <2 x i1>* @G_ogtResV %oleResV = fcmp nsz ole <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %oleResV, <2 x i1>* @G_oleResV %ogeResV = fcmp arcp oge <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ogeResV, <2 x i1>* @G_ogeResV %ordResV = fcmp fast ord <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ordResV, <2 x i1>* @G_ordResV %ueqResV = fcmp nnan ninf ueq <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ueqResV, <2 x i1>* @G_ueqResV %uneResV = fcmp une <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %uneResV, <2 x i1>* @G_uneResV %ultResV = fcmp ult <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ultResV, <2 x i1>* @G_ultResV %ugtResV = fcmp ugt <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ugtResV, <2 x i1>* @G_ugtResV %uleResV = fcmp ule <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %uleResV, <2 x i1>* @G_uleResV %ugeResV = fcmp uge <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %ugeResV, <2 x i1>* @G_ugeResV %unoResV = fcmp uno <2 x float> %v1, %v2, !spirv.Decorations !3 + store <2 x i1> %unoResV, <2 x i1>* @G_unoResV %modResV = call spir_func <2 x float> @_Z4fmodDv2_fDv2_f(<2 x float> %v1, <2 x float> %v2) + store <2 x float> %modResV, <2 x float>* @G_modResV %maxResV = tail call fast spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + store <2 x float> %maxResV, <2 x float>* @G_maxResV %maxCommonResV = tail call spir_func noundef <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + store <2 x float> %maxCommonResV, <2 x float>* @G_maxCommonResV ret void } diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll index 4db0ba33d52c9..face4a9f5e615 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll @@ -2,10 +2,15 @@ ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=KHR %s -o - | FileCheck %s ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=khr %s -o - | FileCheck %s +@G = global i32 0 + define i6 @foo() { %call = tail call i32 @llvm.bitreverse.i32(i32 42) + store i32 %call, ptr @G ret i6 2 } ; CHECK-NOT: OpExtension "SPV_INTEL_arbitrary_precision_integers" ; CHECK-DAG: OpExtension "SPV_KHR_bit_instructions" + +declare i32 @llvm.bitreverse.i32(i32) diff --git a/llvm/test/CodeGen/SPIRV/freeze.ll b/llvm/test/CodeGen/SPIRV/freeze.ll index 9077d2ede72a9..4f7e7794ed03b 100644 --- a/llvm/test/CodeGen/SPIRV/freeze.ll +++ b/llvm/test/CodeGen/SPIRV/freeze.ll @@ -1,15 +1,15 @@ ; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s ; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} -; CHECK: OpName %[[Arg1:.*]] "arg1" -; CHECK: OpName %[[Arg2:.*]] "arg2" -; CHECK: OpName %[[NotAStaticPoison:.*]] "poison1" -; CHECK: OpName %[[NotAStaticPoison]] "nil0" -; CHECK: OpName %[[StaticPoisonIntFreeze:.*]] "nil1" -; CHECK: OpName %[[StaticPoisonFloatFreeze:.*]] "nil2" -; CHECK: OpName %[[Arg1]] "val1" -; CHECK: OpName %[[Const100:.*]] "val2" -; CHECK: OpName %[[Const100]] "val3" +; CHECK-DAG: OpName %[[Arg1:.*]] "arg1" +; CHECK-DAG: OpName %[[Arg2:.*]] "arg2" +; CHECK-DAG: OpName %[[NotAStaticPoison:.*]] "poison1" +; CHECK-DAG: OpName %[[NotAStaticPoison]] "nil0" +; CHECK-DAG: OpName %[[StaticPoisonIntFreeze:.*]] "nil1" +; CHECK-DAG: OpName %[[StaticPoisonFloatFreeze:.*]] "nil2" +; CHECK-DAG: OpName %[[Arg1]] "val1" +; CHECK-DAG: OpName %[[Const100:.*]] "val2" +; CHECK-DAG: OpName %[[Const100]] "val3" ; CHECK: OpDecorate ; CHECK-DAG: %[[FloatTy:.*]] = OpTypeFloat 32 ; CHECK-DAG: %[[ShortTy:.*]] = OpTypeInt 16 0 @@ -18,17 +18,37 @@ ; CHECK-DAG: %[[Undef32:.*]] = OpUndef %[[IntTy]] ; CHECK-DAG: %[[UndefFloat:.*]] = OpUndef %[[FloatTy]] ; CHECK-DAG: %[[Const100]] = OpConstant %[[IntTy]] 100 -; CHECK: %[[Arg1]] = OpFunctionParameter %[[FloatTy]] -; CHECK: %[[NotAStaticPoison]] = OpIAdd %[[ShortTy]] %[[Arg2]] %[[Undef16]] -define spir_func void @foo(float %arg1, i16 %arg2) { +define spir_func i16 @test_nil0(i16 %arg2) { entry: +; CHECK: %[[NotAStaticPoison]] = OpIAdd %[[ShortTy]] %[[Arg2]] %[[Undef16]] %poison1 = add i16 %arg2, undef %nil0 = freeze i16 %poison1 + ret i16 %nil0 +} + +define spir_func i32 @test_nil1() { +entry: %nil1 = freeze i32 undef + ret i32 %nil1 +} + +define spir_func float @test_nil2() { +entry: %nil2 = freeze float poison + ret float %nil2 +} + +define spir_func float @freeze_float(float %arg1) { +entry: +; CHECK: %[[Arg1]] = OpFunctionParameter %[[FloatTy]] %val1 = freeze float %arg1 + ret float %val1 +} + +define spir_func i32 @foo() { +entry: %val2 = freeze i32 100 %val3 = freeze i32 %val2 - ret void + ret i32 %val3 } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll index a97492b8453ea..a15d628cc3614 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/AddUint64.ll @@ -63,7 +63,7 @@ entry: ; CHECK: %[[#a_high:]] = OpVectorShuffle %[[#vec2_int_32]] %[[#a]] %[[#undef_v4i32]] 1 3 ; CHECK: %[[#b_low:]] = OpVectorShuffle %[[#vec2_int_32]] %[[#b]] %[[#undef_v4i32]] 0 2 ; CHECK: %[[#b_high:]] = OpVectorShuffle %[[#vec2_int_32]] %[[#b]] %[[#undef_v4i32]] 1 3 -; CHECK: %[[#iaddcarry:]] = OpIAddCarry %[[#struct_v2i32_v2i32]] %[[#a_low]] %[[#vec2_int_32]] +; CHECK: %[[#iaddcarry:]] = OpIAddCarry %[[#struct_v2i32_v2i32]] %[[#a_low]] %[[#b_low]] ; CHECK: %[[#lowsum:]] = OpCompositeExtract %[[#vec2_int_32]] %[[#iaddcarry]] 0 ; CHECK: %[[#carry:]] = OpCompositeExtract %[[#vec2_int_32]] %[[#iaddcarry]] 1 ; CHECK: %[[#carry_ne0:]] = OpINotEqual %[[#vec2_bool]] %[[#carry]] %[[#const_v2i32_0_0]] diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll index 4a15fa8b14537..75fac211f1108 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/abs.ll @@ -3,24 +3,25 @@ ; CHECK: OpExtInstImport "GLSL.std.450" +@i = global i32 0, align 4 +@absi = global i32 0, align 4 +@f = global float 0.0, align 4 +@absf = global float 0.0, align 4 + define void @main() #1 { entry: - %i = alloca i32, align 4 - %absi = alloca i32, align 4 - %f = alloca float, align 4 - %absf = alloca float, align 4 - %0 = load i32, ptr %i, align 4 + %0 = load i32, ptr @i, align 4 ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] SAbs %[[#]] %elt.abs = call i32 @llvm.abs.i32(i32 %0, i1 false) - store i32 %elt.abs, ptr %absi, align 4 - %1 = load float, ptr %f, align 4 + store i32 %elt.abs, ptr @absi, align 4 + %1 = load float, ptr @f, align 4 ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] FAbs %[[#]] %elt.abs1 = call float @llvm.fabs.f32(float %1) - store float %elt.abs1, ptr %absf, align 4 + store float %elt.abs1, ptr @absf, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll index 7583066c01cf8..dceaa8c209957 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/log10.ll @@ -7,21 +7,23 @@ ; CHECK: %[[#v4float:]] = OpTypeVector %[[#float]] 4 ; CHECK: %[[#float_0_30103001:]] = OpConstant %[[#float]] 0.30103000998497009 +@logf = global float 0.0, align 4 +@logf4 = global <4 x float> zeroinitializer, align 16 + define void @main(float %f, <4 x float> %f4) { entry: ; CHECK-DAG: %[[#f:]] = OpFunctionParameter %[[#float]] ; CHECK-DAG: %[[#f4:]] = OpFunctionParameter %[[#v4float]] - %logf = alloca float, align 4 - %logf4 = alloca <4 x float>, align 16 - ; CHECK: %[[#log2:]] = OpExtInst %[[#float]] %[[#extinst]] Log2 %[[#f]] ; CHECK: %[[#res:]] = OpFMul %[[#float]] %[[#log2]] %[[#float_0_30103001]] %elt.log10 = call float @llvm.log10.f32(float %f) + store float %elt.log10, ptr @logf, align 4 ; CHECK: %[[#log2:]] = OpExtInst %[[#v4float]] %[[#extinst]] Log2 %[[#f4]] ; CHECK: %[[#res:]] = OpVectorTimesScalar %[[#v4float]] %[[#log2]] %[[#float_0_30103001]] %elt.log101 = call <4 x float> @llvm.log10.v4f32(<4 x float> %f4) + store <4 x float> %elt.log101, ptr @logf4, align 16 ret void } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll new file mode 100644 index 0000000000000..5d45178715d70 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll @@ -0,0 +1,77 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4 +; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]] +; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[PTR_INT:[0-9]+]] = OpTypePointer Uniform %[[INT]] +; CHECK-DAG: %[[INT64:[0-9]+]] = OpTypeInt 64 0 +; CHECK-DAG: %[[CONST_4:[0-9]+]] = OpConstant %[[INT]] 4{{$}} + +; CHECK-DAG: %[[ARRAY:[0-9]+]] = OpTypeArray %[[VEC4]] %[[CONST_4]] +; CHECK-DAG: %[[PTR_ARRAY:[0-9]+]] = OpTypePointer Uniform %[[ARRAY]] + +; CHECK-DAG: %[[STRUCT_INNER:[0-9]+]] = OpTypeStruct %[[ARRAY]] %[[INT]] +; CHECK-DAG: %[[STRUCT_CBUFFER:[0-9]+]] = OpTypeStruct %[[STRUCT_INNER]] +; CHECK-DAG: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_CBUFFER]] + +; CHECK-DAG: OpDecorate %[[ARRAY]] ArrayStride 16 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 1 Offset 64 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_CBUFFER]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[STRUCT_CBUFFER]] Block + +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}} +; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}} + +; CHECK: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform + +%__cblayout_MyCBuffer = type <{ [4 x <4 x float>], i32 }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@colors = external hidden local_unnamed_addr addrspace(12) global [4 x <4 x float>], align 16 +@index = external hidden local_unnamed_addr addrspace(12) global i32, align 4 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; Get pointers to the two elements of the cbuffer +; CHECK: %[[COPY:[0-9]+]] = OpCopyObject %[[PTR_CBUFFER]] %[[CBUFFER]] +; CHECK: %[[PTR_ARRAY_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_ARRAY]] %[[COPY]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[PTR_INT_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_INT]] %[[COPY]] %[[ZERO]] %[[ONE]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + +; CHECK: %[[VAL_INT:[0-9]+]] = OpLoad %[[INT]] %[[PTR_INT_ACCESS]] Aligned 4 + %1 = load i32, ptr addrspace(12) @index, align 4 + +; CHECK: %[[VAL_INT64:[0-9]+]] = OpSConvert %[[INT64]] %[[VAL_INT]] + %idxprom.i = sext i32 %1 to i64 + +; CHECK: %[[PTR_ELEM:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_ARRAY_ACCESS]] %[[VAL_INT64]] + %arrayidx.i = getelementptr inbounds <4 x float>, ptr addrspace(12) @colors, i64 %idxprom.i + +; CHECK: %[[VAL_ELEM:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_ELEM]] Aligned 16 + %2 = load <4 x float>, ptr addrspace(12) %arrayidx.i, align 16 + +; CHECK: OpStore {{%[0-9]+}} %[[VAL_ELEM]] Aligned 16 + %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) %0, i32 0) + store <4 x float> %2, ptr addrspace(11) %3, align 16 + ret void +} + +declare target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1), i32) + +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @colors, ptr addrspace(12) @index} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll new file mode 100644 index 0000000000000..fc12f0f0592fe --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll @@ -0,0 +1,90 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpDecorate %[[ARRAY:[0-9]+]] ArrayStride 16 +; CHECK-DAG: OpMemberDecorate %[[CBLAYOUT:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[CBLAYOUT]] 1 Offset 52 +; CHECK-DAG: OpMemberDecorate %[[WRAPPER:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[WRAPPER]] Block +; CHECK-DAG: OpMemberDecorate %[[STRUCT:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD:[0-9]+]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD]] 1 Offset 4 + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[STRUCT]] = OpTypeStruct %[[FLOAT]] +; CHECK-DAG: %[[I8:[0-9]+]] = OpTypeInt 8 0 +; CHECK-DAG: %[[STRUCT_PAD]] = OpTypeStruct %[[STRUCT]] %[[I8]] +; CHECK-DAG: %[[UINT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[CONST_4:[0-9]+]] = OpConstant %[[UINT]] 4 +; CHECK-DAG: %[[ARRAY]] = OpTypeArray %[[STRUCT_PAD]] %[[CONST_4]] +; CHECK-DAG: %[[CBLAYOUT]] = OpTypeStruct %[[ARRAY]] %[[FLOAT]] +; CHECK-DAG: %[[WRAPPER]] = OpTypeStruct %[[CBLAYOUT]] +; CHECK-DAG: %[[PTR_WRAPPER:[0-9]+]] = OpTypePointer Uniform %[[WRAPPER]] +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[UINT]] 0 +; CHECK-DAG: %[[MYCBUFFER:[0-9]+]] = OpVariable %[[PTR_WRAPPER]] Uniform + +; CHECK-DAG: %[[I64:[0-9]+]] = OpTypeInt 64 0 +; CHECK-DAG: %[[STRUCT2:[0-9]+]] = OpTypeStruct %[[I64]] %[[UINT]] +; CHECK-DAG: %[[CONST_3:[0-9]+]] = OpConstant %[[UINT]] 3 +; CHECK-DAG: %[[ARRAY2:[0-9]+]] = OpTypeArray %[[STRUCT2]] %[[CONST_3]] +; CHECK-DAG: %[[CBLAYOUT2:[0-9]+]] = OpTypeStruct %[[ARRAY2]] %[[I64]] +; CHECK-DAG: %[[PTR_PRIVATE:[0-9]+]] = OpTypePointer Private %[[CBLAYOUT2]] +; CHECK-DAG: %[[MYPRIVATEVAR:[0-9]+]] = OpVariable %[[PTR_PRIVATE]] Private + +%__cblayout_MyCBuffer = type <{ <{ [3 x <{ %OrigType, target("spirv.Padding", 12) }>], %OrigType }>, float }> +%OrigType = type <{ float }> + +%__cblayout_MyCBuffer2 = type <{ [ 3 x <{ i64, i32 }> ], i64 }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@myPrivateVar = internal addrspace(10) global %__cblayout_MyCBuffer2 poison + +@myArray = external hidden local_unnamed_addr addrspace(12) global <{ [3 x <{ %OrigType, target("spirv.Padding", 12) }>], %OrigType }>, align 1 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; CHECK: %[[BUFFER_HANDLE:[0-9]+]] = OpCopyObject %[[PTR_WRAPPER]] %[[MYCBUFFER]] +; CHECK: %[[ACCESS_ARRAY:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[BUFFER_HANDLE]] %[[ZERO]] %[[ZERO]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_f32_5_2_0_0_2_1t(i32 1, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + %rem.i = and i32 %1, 3 + +; CHECK: %[[IDX_CONV:[0-9]+]] = OpUConvert {{.*}} + %idxprom.i = zext nneg i32 %rem.i to i64 + +; CHECK: %[[PTR_ELEM:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[ACCESS_ARRAY]] %[[IDX_CONV]] + %cbufferidx.i = getelementptr <{ %OrigType, target("spirv.Padding", 12) }>, ptr addrspace(12) @myArray, i64 %idxprom.i + +; CHECK: %[[PTR_FIELD:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[PTR_ELEM]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[VAL_FLOAT:[0-9]+]] = OpLoad %[[FLOAT]] %[[PTR_FIELD]] Aligned 4 + %2 = load float, ptr addrspace(12) %cbufferidx.i, align 4 + + %val = load i64, ptr addrspace(10) getelementptr (%__cblayout_MyCBuffer2, ptr addrspace(10) @myPrivateVar, i32 0, i32 1), align 8 + %val.float = sitofp i64 %val to float + + %vecinit4.i = insertelement <4 x float> , float %2, i64 0 + %vecinit4.i.2 = insertelement <4 x float> %vecinit4.i, float %val.float, i64 1 + %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_f32_5_2_0_0_2_1t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1) %0, i32 0) + store <4 x float> %vecinit4.i.2, ptr addrspace(11) %3, align 16 +; CHECK: OpImageWrite {{%[0-9]+}} {{%[0-9]+}} {{%[0-9]+}} + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.Image", float, 5, 2, 0, 0, 2, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_f32_5_2_0_0_2_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_f32_5_2_0_0_2_1t(target("spirv.Image", float, 5, 2, 0, 0, 2, 1), i32) + +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = distinct !{ptr @MyCBuffer.cb, ptr addrspace(12) @myArray, null} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll new file mode 100644 index 0000000000000..fb93d53b337b3 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll @@ -0,0 +1,74 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC3:[0-9]+]] = OpTypeVector %[[FLOAT]] 3 +; CHECK-DAG: %[[I8:[0-9]+]] = OpTypeInt 8 0 +; CHECK-DAG: %[[STRUCT_PAD:[0-9]+]] = OpTypeStruct %[[VEC3]] %[[I8]] +; CHECK-DAG: %[[UINT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[CONST_3:[0-9]+]] = OpConstant %[[UINT]] 3 +; CHECK-DAG: %[[ARRAY:[0-9]+]] = OpTypeArray %[[STRUCT_PAD]] %[[CONST_3]] +; CHECK-DAG: %[[CBLAYOUT:[0-9]+]] = OpTypeStruct %[[ARRAY]] +; CHECK-DAG: OpMemberDecorate %[[CBLAYOUT]] 0 Offset 0 +; CHECK-DAG: %[[WRAPPER:[0-9]+]] = OpTypeStruct %[[CBLAYOUT]] +; CHECK-DAG: %[[PTR_WRAPPER:[0-9]+]] = OpTypePointer Uniform %[[WRAPPER]] +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[UINT]] 0 +; CHECK-DAG: %[[MYCBUFFER:[0-9]+]] = OpVariable %[[PTR_WRAPPER]] Uniform + + +; TODO(168401): This array stride and offset of element 1 are incorrect. This +; is an issue with how 3 element vectors are handled. +; CHECK-DAG: OpDecorate %[[ARRAY]] ArrayStride 20 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_PAD]] 1 Offset 16 +; CHECK-DAG: OpMemberDecorate %[[WRAPPER]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[WRAPPER]] Block +%__cblayout_MyCBuffer = type <{ <{ [2 x <{ <3 x float>, target("spirv.Padding", 4) }>], <3 x float> }> }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@myArray = external hidden local_unnamed_addr addrspace(12) global <{ [2 x <{ <3 x float>, target("spirv.Padding", 4) }>], <3 x float> }>, align 16 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; CHECK: %[[BUFFER_HANDLE:[0-9]+]] = OpCopyObject %[[PTR_WRAPPER]] %[[MYCBUFFER]] +; CHECK: %[[ACCESS_ARRAY:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[BUFFER_HANDLE]] %[[ZERO]] %[[ZERO]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + +; CHECK: %[[IDX:[0-9]+]] = OpUMod %[[UINT]] {{%[0-9]+}} %[[CONST_3]] + %rem.i = urem i32 %1, 3 + +; CHECK: %[[IDX_CONV:[0-9]+]] = OpUConvert {{.*}} %[[IDX]] + %idxprom.i = zext nneg i32 %rem.i to i64 + +; CHECK: %[[PTR_ELEM:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[ACCESS_ARRAY]] %[[IDX_CONV]] + %cbufferidx.i = getelementptr <{ <3 x float>, target("spirv.Padding", 4) }>, ptr addrspace(12) @myArray, i64 %idxprom.i + +; CHECK: %[[PTR_FIELD:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[PTR_ELEM]] {{.*}} +; CHECK: %[[VAL_VEC3:[0-9]+]] = OpLoad %[[VEC3]] %[[PTR_FIELD]] Aligned 16 + %2 = load <3 x float>, ptr addrspace(12) %cbufferidx.i, align 16 + + %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3f32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1) %0, i32 %1) + store <3 x float> %2, ptr addrspace(11) %3, align 16 + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3f32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1), i32) + +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @myArray} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll new file mode 100644 index 0000000000000..1dd2c92bca09d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll @@ -0,0 +1,73 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4 +; CHECK-DAG: %[[PTR_FLOAT:[0-9]+]] = OpTypePointer Uniform %[[FLOAT]] +; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]] +; CHECK-DAG: %[[STRUCT:[0-9]+]] = OpTypeStruct %[[VEC4]] %[[FLOAT]] +; CHECK-DAG: %[[CBUFFER_TYPE:[0-9]+]] = OpTypeStruct %[[STRUCT]] +; CHECK-DAG: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[CBUFFER_TYPE]] +; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}} +; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}} + +; CHECK-DAG: OpMemberDecorate %[[STRUCT]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT]] 1 Offset 16 +; CHECK-DAG: OpMemberDecorate %[[CBUFFER_TYPE]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[CBUFFER_TYPE]] Block + +; CHECK-DAG: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform + +%__cblayout_MyCBuffer = type <{ <4 x float>, float }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@color = external hidden local_unnamed_addr addrspace(12) global <4 x float>, align 16 +@factor = external hidden local_unnamed_addr addrspace(12) global float, align 4 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +define void @main() #1 { +entry: +; CHECK: %[[COPY:[0-9]+]] = OpCopyObject %[[PTR_CBUFFER]] %[[CBUFFER]] +; CHECK: %[[PTR_VEC4_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[COPY]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[PTR_FLOAT_ACCESS:[0-9]+]] = OpAccessChain %[[PTR_FLOAT]] %[[COPY]] %[[ZERO]] %[[ONE]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + %2 = tail call i32 @llvm.spv.thread.id.i32(i32 1) + %conv.i = uitofp i32 %1 to float + %conv2.i = uitofp i32 %2 to float + %3 = insertelement <4 x float> , float %conv.i, i64 0 + %vecinit5.i = insertelement <4 x float> %3, float %conv2.i, i64 1 + +; CHECK: %[[VAL_VEC4:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_VEC4_ACCESS]] Aligned 16 + %4 = load <4 x float>, ptr addrspace(12) @color, align 16 + %mul.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %vecinit5.i, %4 + +; CHECK: %[[VAL_FLOAT:[0-9]+]] = OpLoad %[[FLOAT]] %[[PTR_FLOAT_ACCESS]] Aligned 4 + %5 = load float, ptr addrspace(12) @factor, align 4 + + %splat.splatinsert.i = insertelement <4 x float> poison, float %5, i64 0 + %splat.splat.i = shufflevector <4 x float> %splat.splatinsert.i, <4 x float> poison, <4 x i32> zeroinitializer + %mul6.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %mul.i, %splat.splat.i + %6 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) %0, i32 0) + store <4 x float> %mul6.i, ptr addrspace(11) %6, align 16 + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1), i32) + +attributes #1 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @color, ptr addrspace(12) @factor} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll new file mode 100644 index 0000000000000..60512fe3ed718 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll @@ -0,0 +1,158 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4 +; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]] +; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}} + +; CHECK-DAG: %[[STRUCT_MATRIX:[0-9]+]] = OpTypeStruct %[[VEC4]] %[[VEC4]] %[[VEC4]] %[[VEC4]] +; CHECK-DAG: %[[PTR_MATRIX:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_MATRIX]] +; CHECK-DAG: %[[PTR_FLOAT:[0-9]+]] = OpTypePointer Uniform %[[FLOAT]] + +; CHECK-DAG: %[[STRUCT_MYSTRUCT:[0-9]+]] = OpTypeStruct %[[STRUCT_MATRIX]] %[[STRUCT_MATRIX]] %[[STRUCT_MATRIX]] + +; CHECK-DAG: %[[PTR_MYSTRUCT:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_MYSTRUCT]] +; CHECK-DAG: %[[STRUCT_INNER:[0-9]+]] = OpTypeStruct %[[STRUCT_MYSTRUCT]] %[[FLOAT]] + +; CHECK-DAG: %[[STRUCT_CBUFFER:[0-9]+]] = OpTypeStruct %[[STRUCT_INNER]] +; CHECK-DAG: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_CBUFFER]] +; CHECK-DAG: %[[INT64:[0-9]+]] = OpTypeInt 64 0 + +; CHECK-DAG: OpMemberDecorate %[[STRUCT_CBUFFER]] 0 Offset 0 +; CHECK-DAG: OpDecorate %[[STRUCT_CBUFFER]] Block +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_INNER]] 1 Offset 192 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MYSTRUCT]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MYSTRUCT]] 1 Offset 64 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MYSTRUCT]] 2 Offset 128 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 0 Offset 0 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 1 Offset 16 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 2 Offset 32 +; CHECK-DAG: OpMemberDecorate %[[STRUCT_MATRIX]] 3 Offset 48 + +; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}} +; CHECK-DAG: %[[ZERO_64:[0-9]+]] = OpConstant %[[INT64]] 0{{$}} +; CHECK-DAG: %[[ONE_64:[0-9]+]] = OpConstant %[[INT64]] 1{{$}} +; CHECK-DAG: %[[TWO_64:[0-9]+]] = OpConstant %[[INT64]] 2{{$}} +; CHECK-DAG: %[[THREE_64:[0-9]+]] = OpConstant %[[INT64]] 3{{$}} + +; CHECK: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform + +%__cblayout_MyCBuffer = type <{ %MyStruct, float }> +%MyStruct = type <{ %MyMatrix, %MyMatrix, %MyMatrix }> +%MyMatrix = type <{ <4 x float>, <4 x float>, <4 x float>, <4 x float> }> + +@MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison +@transforms = external hidden local_unnamed_addr addrspace(12) global %MyStruct, align 1 +@blend = external hidden local_unnamed_addr addrspace(12) global float, align 4 +@MyCBuffer.str = private unnamed_addr constant [10 x i8] c"MyCBuffer\00", align 1 +@.str = private unnamed_addr constant [7 x i8] c"output\00", align 1 + +declare target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32, i32, i32, i32, ptr) + +declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) + +define void @main() #3 { +entry: +; CHECK: %[[COPY:[0-9]+]] = OpCopyObject %[[PTR_CBUFFER]] %[[CBUFFER]] +; CHECK: %[[PTR_STRUCT:[0-9]+]] = OpAccessChain %[[PTR_MYSTRUCT]] %[[COPY]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[PTR_FLOAT_VAL:[0-9]+]] = OpAccessChain %[[PTR_FLOAT]] %[[COPY]] %[[ZERO]] %[[ONE]] + %MyCBuffer.cb_h.i.i = tail call target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_s___cblayout_MyCBuffers_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @MyCBuffer.str) + store target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) %MyCBuffer.cb_h.i.i, ptr @MyCBuffer.cb, align 8 + + %0 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call i32 @llvm.spv.thread.id.i32(i32 0) + %2 = tail call i32 @llvm.spv.thread.id.i32(i32 1) + %conv.i = uitofp i32 %1 to float + %conv2.i = uitofp i32 %2 to float + %3 = insertelement <4 x float> poison, float %conv.i, i64 0 + +; CHECK: %[[PTR_M0_V0:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ZERO]] %[[ZERO]] +; CHECK: %[[VAL_M0_V0:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M0_V0]] Aligned 16 + %4 = load <4 x float>, ptr addrspace(12) @transforms, align 16 + +; CHECK: %[[PTR_M0_V1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ZERO_64]] %[[ONE_64]] +; CHECK: %[[VAL_M0_V1:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M0_V1]] Aligned 16 + %5 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 16), align 16 + +; CHECK: %[[PTR_M0_V3:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ZERO_64]] %[[THREE_64]] +; CHECK: %[[VAL_M0_V3:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M0_V3]] Aligned 16 + %6 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 48), align 16 + + %splat.splat.i18.i = shufflevector <4 x float> %3, <4 x float> poison, <4 x i32> zeroinitializer + %7 = insertelement <4 x float> poison, float %conv2.i, i64 0 + %splat.splat2.i19.i = shufflevector <4 x float> %7, <4 x float> poison, <4 x i32> zeroinitializer + %mul3.i20.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %splat.splat2.i19.i, %5 + %8 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat.i18.i, <4 x float> nofpclass(nan inf) %4, <4 x float> %mul3.i20.i) + %9 = fadd reassoc nnan ninf nsz arcp afn <4 x float> %8, %6 +; CHECK: %[[PTR_M1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_MATRIX]] %[[PTR_STRUCT]] %[[ONE_64]] +; CHECK: %[[PTR_M1_V0:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[PTR_M1]] %[[ZERO]] +; CHECK: %[[VAL_M1_V0:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V0]] Aligned 16 + %10 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 64), align 16 +; CHECK: %[[PTR_M1_V1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ONE_64]] %[[ONE_64]] +; CHECK: %[[VAL_M1_V1:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V1]] Aligned 16 + %11 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 80), align 16 +; CHECK: %[[PTR_M1_V2:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ONE_64]] %[[TWO_64]] +; CHECK: %[[VAL_M1_V2:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V2]] Aligned 16 + %12 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 96), align 16 +; CHECK: %[[PTR_M1_V3:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[ONE_64]] %[[THREE_64]] +; CHECK: %[[VAL_M1_V3:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M1_V3]] Aligned 16 + %13 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 112), align 16 + %splat.splat.i13.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> zeroinitializer + %splat.splat2.i14.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> + %mul3.i15.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %splat.splat2.i14.i, %11 + %14 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat.i13.i, <4 x float> nofpclass(nan inf) %10, <4 x float> %mul3.i15.i) + %splat.splat5.i16.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> + %15 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat5.i16.i, <4 x float> nofpclass(nan inf) %12, <4 x float> %14) + %splat.splat7.i17.i = shufflevector <4 x float> %9, <4 x float> poison, <4 x i32> + %16 = tail call reassoc nnan ninf nsz arcp afn noundef <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat7.i17.i, <4 x float> nofpclass(nan inf) %13, <4 x float> %15) +; CHECK: %[[PTR_M2:[0-9]+]] = OpInBoundsAccessChain %[[PTR_MATRIX]] %[[PTR_STRUCT]] %[[TWO_64]] +; CHECK: %[[PTR_M2_V0:[0-9]+]] = OpAccessChain %[[PTR_VEC4]] %[[PTR_M2]] %[[ZERO]] +; CHECK: %[[VAL_M2_V0:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V0]] Aligned 16 + %17 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 128), align 16 +; CHECK: %[[PTR_M2_V1:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[TWO_64]] %[[ONE_64]] +; CHECK: %[[VAL_M2_V1:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V1]] Aligned 16 + %18 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 144), align 16 +; CHECK: %[[PTR_M2_V2:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[TWO_64]] %[[TWO_64]] +; CHECK: %[[VAL_M2_V2:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V2]] Aligned 16 + %19 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 160), align 16 +; CHECK: %[[PTR_M2_V3:[0-9]+]] = OpInBoundsAccessChain %[[PTR_VEC4]] %[[PTR_STRUCT]] %[[TWO_64]] %[[THREE_64]] +; CHECK: %[[VAL_M2_V3:[0-9]+]] = OpLoad %[[VEC4]] %[[PTR_M2_V3]] Aligned 16 + %20 = load <4 x float>, ptr addrspace(12) getelementptr inbounds nuw (i8, ptr addrspace(12) @transforms, i64 176), align 16 + %splat.splat.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> zeroinitializer + %splat.splat2.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> + %mul3.i.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %splat.splat2.i.i, %18 + %21 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat.i.i, <4 x float> nofpclass(nan inf) %17, <4 x float> %mul3.i.i) + %splat.splat5.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> + %22 = tail call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat5.i.i, <4 x float> nofpclass(nan inf) %19, <4 x float> %21) + %splat.splat7.i.i = shufflevector <4 x float> %16, <4 x float> poison, <4 x i32> + %23 = tail call reassoc nnan ninf nsz arcp afn noundef <4 x float> @llvm.fmuladd.v4f32(<4 x float> %splat.splat7.i.i, <4 x float> nofpclass(nan inf) %20, <4 x float> %22) + %24 = load float, ptr addrspace(12) @blend, align 4 +; CHECK: %[[VAL_FLOAT:[0-9]+]] = OpLoad %[[FLOAT]] %[[PTR_FLOAT_VAL]] Aligned 4 +; CHECK: %[[SPLAT_INS:[0-9]+]] = OpCompositeInsert %[[VEC4]] %[[VAL_FLOAT]] {{.*}} 0 +; CHECK: %[[SPLAT:[0-9]+]] = OpVectorShuffle %[[VEC4]] %[[SPLAT_INS]] {{.*}} 0 0 0 0 +; CHECK: %[[RES:[0-9]+]] = OpFMul %[[VEC4]] {{%[0-9]+}} %[[SPLAT]] + %splat.splatinsert.i = insertelement <4 x float> poison, float %24, i64 0 + %splat.splat.i = shufflevector <4 x float> %splat.splatinsert.i, <4 x float> poison, <4 x i32> zeroinitializer + %mul.i = fmul reassoc nnan ninf nsz arcp afn <4 x float> %23, %splat.splat.i + %25 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) %0, i32 0) + store <4 x float> %mul.i, ptr addrspace(11) %25, align 16 +; CHECK: OpStore {{%[0-9]+}} %[[RES]] Aligned 16 + ret void +} + +declare i32 @llvm.spv.thread.id.i32(i32) + +declare target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_1t(i32, i32, i32, i32, ptr) + +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 1), i32) + +attributes #1 = { alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(none) } +attributes #3 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } +attributes #4 = { mustprogress nofree nosync nounwind willreturn memory(none) } + +!hlsl.cbs = !{!0} + +!0 = !{ptr @MyCBuffer.cb, ptr addrspace(12) @transforms, ptr addrspace(12) @blend} \ No newline at end of file diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll index b788f34bf7238..02825e3cbb599 100644 --- a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll +++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll @@ -4,25 +4,40 @@ ; CHECK-LABEL: Begin function original_testcase define fastcc void @original_testcase() { top: + %0 = alloca [1 x ptr], align 4 ; CHECK: OpCompositeInsert - %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + %1 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + store [1 x ptr] %1, ptr %0 ret void } ; CHECK-LABEL: Begin function additional_testcases define fastcc void @additional_testcases() { top: + %0 = alloca [2 x ptr], align 4 + + ; Test with different pointer types ; CHECK: OpCompositeInsert %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0 + ; CHECK: OpStore + store [1 x ptr] %1, ptr %0 + ; CHECK-NEXT: OpCompositeInsert %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0 + ; CHECK: OpStore + store {ptr, i32} %2, ptr %0 + ; CHECK-NEXT: OpCompositeInsert %3 = insertvalue {ptr, ptr} undef, ptr null, 0 + ; CHECK: OpStore + store {ptr, ptr} %3, ptr %0 ; Test with undef aggregate ; CHECK-NEXT: OpCompositeInsert %4 = insertvalue [1 x ptr] undef, ptr undef, 0 + ; CHECK: OpStore + store [1 x ptr] %4, ptr %0 ret void } diff --git a/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll b/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll index 6e6cd2f68a971..510c7954c78f8 100644 --- a/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll +++ b/llvm/test/CodeGen/SPIRV/instructions/select-ptr-load.ll @@ -13,13 +13,18 @@ %struct = type { [3 x float] } +@G = global float 0.0 + define spir_kernel void @bar(i1 %sw) { entry: %var1 = alloca %struct + store %struct zeroinitializer, ptr %var1 %var2 = alloca %struct + store %struct zeroinitializer, ptr %var2 %elem1 = getelementptr inbounds [3 x float], ptr %var1, i64 0, i64 0 %elem2 = getelementptr inbounds [3 x float], ptr %var2, i64 0, i64 1 %elem = select i1 %sw, ptr %elem1, ptr %elem2 %res = load float, ptr %elem + store float %res, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll b/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll deleted file mode 100644 index efde6a2c082fc..0000000000000 --- a/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll +++ /dev/null @@ -1,23 +0,0 @@ -; This test case ensures that cleaning of temporary constants doesn't purge tracked ones. - -; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} - -; CHECK-SPIRV-DAG: %[[#Int:]] = OpTypeInt 8 0 -; CHECK-SPIRV-DAG: %[[#C0:]] = OpConstantNull %[[#Int]] -; CHECK-SPIRV-DAG: %[[#C1:]] = OpConstant %[[#Int]] 1{{$}} - -define spir_kernel void @foo() { -entry: - %addr = alloca i32 - %r1 = call i8 @_Z20__spirv_SpecConstantia(i32 0, i8 1) - ; The name '%conv17.i' is important for the test case, - ; because it includes i32 0 when encoded for SPIR-V usage. - %conv17.i = sext i8 %r1 to i64 - %tobool = trunc i8 %r1 to i1 - %r2 = zext i1 %tobool to i32 - store i32 %r2, ptr %addr - ret void -} - -declare i8 @_Z20__spirv_SpecConstantia(i32, i8) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll index 3d2080e0050b7..691325251f11d 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll @@ -8,14 +8,15 @@ %class.anon = type { i8 } -define spir_func void @_Z3fooi(i32 %x) { +define spir_func i32 @_Z3fooi(i32 %x) { entry: %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 - %0 = load i32, i32* %x.addr, align 4 + %0 = load i32, ptr %x.addr, align 4 %cmp = icmp ne i32 %0, 0 call void @llvm.assume(i1 %cmp) - ret void + %retval = select i1 %cmp, i32 100, i32 10 + ret i32 %retval } declare void @llvm.assume(i1) @@ -45,9 +46,9 @@ entry: call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) store i32 1, i32* %a, align 4 %1 = load i32, i32* %a, align 4 - call spir_func void @_Z3fooi(i32 %1) - %2 = bitcast i32* %a to i8* - call void @llvm.lifetime.end.p0i8(i64 4, i8* %2) + %2 = call spir_func i32 @_Z3fooi(i32 %1) + %3 = bitcast i32* %a to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %3) ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll index 438fff6e94f89..18856147896bb 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll @@ -7,20 +7,20 @@ ; CHECK: OpCapability ArbitraryPrecisionIntegersINTEL ; CHECK: OpExtension "SPV_INTEL_arbitrary_precision_integers" -; CHECK: %[[#I4:]] = OpTypeInt 4 0 -; CHECK: %[[#I2:]] = OpTypeInt 2 0 -; CHECK: %[[#Z4:]] = OpConstantNull %[[#I4]] -; CHECK: %[[#Z2:]] = OpConstantNull %[[#I2]] -; CHECK: %[[#V2I2:]] = OpTypeVector %[[#I2]] 2 -; CHECK: %[[#V2I4:]] = OpTypeVector %[[#I4]] 2 -; CHECK: %[[#V3I2:]] = OpTypeVector %[[#I2]] 3 -; CHECK: %[[#V3I4:]] = OpTypeVector %[[#I4]] 3 -; CHECK: %[[#V4I2:]] = OpTypeVector %[[#I2]] 4 -; CHECK: %[[#V4I4:]] = OpTypeVector %[[#I4]] 4 -; CHECK: %[[#V8I2:]] = OpTypeVector %[[#I2]] 8 -; CHECK: %[[#V8I4:]] = OpTypeVector %[[#I4]] 8 -; CHECK: %[[#V16I2:]] = OpTypeVector %[[#I2]] 16 -; CHECK: %[[#V16I4:]] = OpTypeVector %[[#I4]] 16 +; CHECK-DAG: %[[#I4:]] = OpTypeInt 4 0 +; CHECK-DAG: %[[#I2:]] = OpTypeInt 2 0 +; CHECK-DAG: %[[#Z4:]] = OpConstantNull %[[#I4]] +; CHECK-DAG: %[[#Z2:]] = OpConstantNull %[[#I2]] +; CHECK-DAG: %[[#V2I2:]] = OpTypeVector %[[#I2]] 2 +; CHECK-DAG: %[[#V2I4:]] = OpTypeVector %[[#I4]] 2 +; CHECK-DAG: %[[#V3I2:]] = OpTypeVector %[[#I2]] 3 +; CHECK-DAG: %[[#V3I4:]] = OpTypeVector %[[#I4]] 3 +; CHECK-DAG: %[[#V4I2:]] = OpTypeVector %[[#I2]] 4 +; CHECK-DAG: %[[#V4I4:]] = OpTypeVector %[[#I4]] 4 +; CHECK-DAG: %[[#V8I2:]] = OpTypeVector %[[#I2]] 8 +; CHECK-DAG: %[[#V8I4:]] = OpTypeVector %[[#I4]] 8 +; CHECK-DAG: %[[#V16I2:]] = OpTypeVector %[[#I2]] 16 +; CHECK-DAG: %[[#V16I4:]] = OpTypeVector %[[#I4]] 16 ; CHECK: %[[#]] = OpBitReverse %[[#I2]] %[[#Z2]] @@ -36,45 +36,70 @@ ; CHECK: %[[#]] = OpBitReverse %[[#V16I2]] %[[#]] ; CHECK: %[[#]] = OpBitReverse %[[#V16I4]] %[[#]] +@G_i2_res = global i2 0 +@G_i4_res = global i4 0 +@G_v2i2_res = global <2 x i2> zeroinitializer +@G_v2i4_res = global <2 x i4> zeroinitializer +@G_v3i2_res = global <3 x i2> zeroinitializer +@G_v3i4_res = global <3 x i4> zeroinitializer +@G_v4i2_res = global <4 x i2> zeroinitializer +@G_v4i4_res = global <4 x i4> zeroinitializer +@G_v8i2_res = global <8 x i2> zeroinitializer +@G_v8i4_res = global <8 x i4> zeroinitializer +@G_v16i2_res = global <16 x i2> zeroinitializer +@G_v16i4_res = global <16 x i4> zeroinitializer + define spir_kernel void @testBitRev() { entry: %call2 = call i2 @llvm.bitreverse.i2(i2 0) + store i2 %call2, i2* @G_i2_res %call4 = call i4 @llvm.bitreverse.i4(i4 0) + store i4 %call4, i4* @G_i4_res ret void } define spir_kernel void @testBitRevV2(<2 x i2> %a, <2 x i4> %b) { entry: %call2 = call <2 x i2> @llvm.bitreverse.v2i2(<2 x i2> %a) + store <2 x i2> %call2, <2 x i2>* @G_v2i2_res %call4 = call <2 x i4> @llvm.bitreverse.v2i4(<2 x i4> %b) + store <2 x i4> %call4, <2 x i4>* @G_v2i4_res ret void } define spir_kernel void @testBitRevV3(<3 x i2> %a, <3 x i4> %b) { entry: %call2 = call <3 x i2> @llvm.bitreverse.v3i2(<3 x i2> %a) + store <3 x i2> %call2, <3 x i2>* @G_v3i2_res %call4 = call <3 x i4> @llvm.bitreverse.v3i4(<3 x i4> %b) + store <3 x i4> %call4, <3 x i4>* @G_v3i4_res ret void } define spir_kernel void @testBitRevV4(<4 x i2> %a, <4 x i4> %b) { entry: %call2 = call <4 x i2> @llvm.bitreverse.v4i2(<4 x i2> %a) + store <4 x i2> %call2, <4 x i2>* @G_v4i2_res %call4 = call <4 x i4> @llvm.bitreverse.v4i4(<4 x i4> %b) + store <4 x i4> %call4, <4 x i4>* @G_v4i4_res ret void } define spir_kernel void @testBitRevV8(<8 x i2> %a, <8 x i4> %b) { entry: %call2 = call <8 x i2> @llvm.bitreverse.v8i2(<8 x i2> %a) + store <8 x i2> %call2, <8 x i2>* @G_v8i2_res %call4 = call <8 x i4> @llvm.bitreverse.v8i4(<8 x i4> %b) + store <8 x i4> %call4, <8 x i4>* @G_v8i4_res ret void } define spir_kernel void @testBitRevV16(<16 x i2> %a, <16 x i4> %b) { entry: %call2 = call <16 x i2> @llvm.bitreverse.v16i2(<16 x i2> %a) + store <16 x i2> %call2, <16 x i2>* @G_v16i2_res %call4 = call <16 x i4> @llvm.bitreverse.v16i4(<16 x i4> %b) + store <16 x i4> %call4, <16 x i4>* @G_v16i4_res ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll index 11bedfa605f9b..8e8e4df8fabc6 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-arithmetic.ll @@ -23,15 +23,28 @@ ; CHECK: OpExtInst %[[#]] %[[#]] fma %[[#]] %[[#]] %[[#]] ; CHECK: OpFRem +@G_r1 = global float 0.0 +@G_r2 = global float 0.0 +@G_r3 = global float 0.0 +@G_r4 = global float 0.0 +@G_r5 = global float 0.0 +@G_r6 = global float 0.0 + ; Function Attrs: norecurse nounwind strictfp define dso_local spir_kernel void @test(float %a, i32 %in, i32 %ui) { entry: %r1 = tail call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + store float %r1, ptr @G_r1 %r2 = tail call float @llvm.experimental.constrained.fdiv.f32(float %a, float %a, metadata !"round.towardzero", metadata !"fpexcept.strict") + store float %r2, ptr @G_r2 %r3 = tail call float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.upward", metadata !"fpexcept.strict") + store float %r3, ptr @G_r3 %r4 = tail call float @llvm.experimental.constrained.fmul.f32(float %a, float %a, metadata !"round.downward", metadata !"fpexcept.strict") + store float %r4, ptr @G_r4 %r5 = tail call float @llvm.experimental.constrained.fma.f32(float %a, float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + store float %r5, ptr @G_r5 %r6 = tail call float @llvm.experimental.constrained.frem.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") + store float %r6, ptr @G_r6 ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll index f83cd8ad1969c..375da5b32e232 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll @@ -18,19 +18,20 @@ ; CL: %[[#FooVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] ; CL-NEXT: OpLifetimeStart %[[#Casted1]] 16 -; CL-NEXT: OpBitcast -; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] +; CL: OpInBoundsPtrAccessChain +; CL: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] ; CL-NEXT: OpLifetimeStop %[[#Casted2]] 16 ; VK: OpFunction ; VK: %[[#FooVar:]] = OpVariable ; VK-NEXT: OpInBoundsAccessChain +; VK-NEXT: OpStore ; VK-NEXT: OpReturn define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 + store i64 zeroinitializer, ptr %KernelFunc, align 8 call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } @@ -39,37 +40,40 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; CL: %[[#BarVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] ; CL-NEXT: OpLifetimeStart %[[#Casted1]] 16 -; CL-NEXT: OpBitcast -; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] +; CL: OpInBoundsPtrAccessChain +; CL: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] ; CL-NEXT: OpLifetimeStop %[[#Casted2]] 16 ; VK: OpFunction ; VK: %[[#BarVar:]] = OpVariable ; VK-NEXT: OpInBoundsAccessChain +; VK-NEXT: OpStore ; VK-NEXT: OpReturn define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 + store i64 zeroinitializer, ptr %KernelFunc, align 8 call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } ; CL: OpFunction ; CL: %[[#TestVar:]] = OpVariable -; CL-NEXT: OpLifetimeStart %[[#TestVar]] 1 -; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: OpLifetimeStop %[[#TestVar]] 1 +; CL: OpLifetimeStart %[[#TestVar]] 1 +; CL: OpInBoundsPtrAccessChain +; CL: OpLifetimeStop %[[#TestVar]] 1 ; VK: OpFunction ; VK: %[[#Test:]] = OpVariable ; VK-NEXT: OpInBoundsAccessChain +; VK-NEXT: OpStore ; VK-NEXT: OpReturn define spir_func void @test(ptr noundef align 8 %_arg) { %var = alloca i8, align 8 call void @llvm.lifetime.start.p0(ptr nonnull %var) %KernelFunc = getelementptr inbounds i8, ptr %var, i64 1 + store i8 0, ptr %KernelFunc, align 8 call void @llvm.lifetime.end.p0(ptr nonnull %var) ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/logical-memcpy.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/logical-memcpy.ll new file mode 100644 index 0000000000000..63eddd20bfc22 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/logical-memcpy.ll @@ -0,0 +1,32 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpName %[[dst_var:[0-9]+]] "dst" +; CHECK: OpName %[[src_var:[0-9]+]] "src" + +; CHECK: %[[f32:[0-9]+]] = OpTypeFloat 32 +; CHECK: %[[structS:[0-9]+]] = OpTypeStruct %[[f32]] %[[f32]] %[[f32]] %[[f32]] %[[f32]] +; CHECK: %[[ptr_crosswkgrp_structS:[0-9]+]] = OpTypePointer CrossWorkgroup %[[structS]] +%struct.S = type <{ float, float, float, float, float }> + +; CHECK-DAG: %[[src_var]] = OpVariable %[[ptr_crosswkgrp_structS]] CrossWorkgroup +@src = external dso_local addrspace(1) global %struct.S, align 4 + +; CHECK-DAG: %[[dst_var]] = OpVariable %[[ptr_crosswkgrp_structS]] CrossWorkgroup +@dst = external dso_local addrspace(1) global %struct.S, align 4 + +; CHECK: %[[main_func:[0-9]+]] = OpFunction %{{[0-9]+}} None %{{[0-9]+}} +; CHECK: %[[entry:[0-9]+]] = OpLabel +; Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(readwrite, inaccessiblemem: none, target_mem0: none, target_mem1: none) +define void @main() local_unnamed_addr #0 { +entry: +; CHECK: OpCopyMemory %[[dst_var]] %[[src_var]] Aligned 4 + call void @llvm.memcpy.p0.p0.i64(ptr addrspace(1) align 4 @dst, ptr addrspace(1) align 4 @src, i64 20, i1 false) + ret void +; CHECK: OpReturn +; CHECK: OpFunctionEnd +} + +attributes #0 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } + + diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll index 08f15c077fed9..db930d1b28ec3 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/satur-arith.ll @@ -9,29 +9,55 @@ ; CHECK-DAG: OpName %[[#Bar:]] "bar" ; CHECK: %[[#Foo]] = OpFunction ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat ; CHECK: %[[#Bar]] = OpFunction ; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat -; CHECK-NEXT: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] u_sub_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_add_sat +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] s_sub_sat + +@G_r1_foo = global i16 0 +@G_r2_foo = global i16 0 +@G_r3_foo = global i16 0 +@G_r4_foo = global i16 0 +@G_r1_bar = global <4 x i32> zeroinitializer +@G_r2_bar = global <4 x i32> zeroinitializer +@G_r3_bar = global <4 x i32> zeroinitializer +@G_r4_bar = global <4 x i32> zeroinitializer define spir_func void @foo(i16 %x, i16 %y) { entry: %r1 = tail call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y) + store i16 %r1, ptr @G_r1_foo %r2 = tail call i16 @llvm.usub.sat.i16(i16 %x, i16 %y) + store i16 %r2, ptr @G_r2_foo %r3 = tail call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y) + store i16 %r3, ptr @G_r3_foo %r4 = tail call i16 @llvm.ssub.sat.i16(i16 %x, i16 %y) + store i16 %r4, ptr @G_r4_foo ret void } define spir_func void @bar(<4 x i32> %x, <4 x i32> %y) { entry: %r1 = tail call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r1, ptr @G_r1_bar %r2 = tail call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r2, ptr @G_r2_bar %r3 = tail call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r3, ptr @G_r3_bar %r4 = tail call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + store <4 x i32> %r4, ptr @G_r4_bar ret void } + +declare i16 @llvm.uadd.sat.i16(i16, i16) +declare i16 @llvm.usub.sat.i16(i16, i16) +declare i16 @llvm.sadd.sat.i16(i16, i16) +declare i16 @llvm.ssub.sat.i16(i16, i16) +declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll index 08e429f36827c..54cb096da8d89 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/uadd.with.overflow.ll @@ -90,12 +90,13 @@ define dso_local spir_func void @umulo_v2i64(<2 x i64> %a, <2 x i64> %b, ptr %p) ; CHECK: OpIAddCarry %[[StructLong]] ; CHECK: OpIAddCarry %[[StructLong]] ; CHECK: OpReturn -define void @foo(i64 %a, i64 %b) { +define i64 @foo(i64 %a, i64 %b) { %r1 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %r2 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %d1 = extractvalue { i64, i1 } %r1, 0 %d2 = extractvalue { i64, i1 } %r2, 0 - ret void + %sum = add i64 %d1, %d2 + ret i64 %sum } declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8) diff --git a/llvm/test/CodeGen/SPIRV/logical-access-chain.ll b/llvm/test/CodeGen/SPIRV/logical-access-chain.ll index d56678ecfc2c9..e96ebf777c28f 100644 --- a/llvm/test/CodeGen/SPIRV/logical-access-chain.ll +++ b/llvm/test/CodeGen/SPIRV/logical-access-chain.ll @@ -2,6 +2,7 @@ ; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0 ; CHECK-DAG: [[uint2:%[0-9]+]] = OpTypeVector [[uint]] 2 +; CHECK-DAG: [[uint_0:%[0-9]+]] = OpConstant [[uint]] 0 ; CHECK-DAG: [[uint_1:%[0-9]+]] = OpConstant [[uint]] 1 ; CHECK-DAG: [[ptr_uint:%[0-9]+]] = OpTypePointer Function [[uint]] ; CHECK-DAG: [[ptr_uint2:%[0-9]+]] = OpTypePointer Function [[uint2]] @@ -12,7 +13,9 @@ entry: ; CHECK: [[var:%[0-9]+]] = OpVariable [[ptr_uint2]] Function %1 = getelementptr <2 x i32>, ptr %0, i32 0, i32 1 -; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[var]] [[uint_1]] +; CHECK: [[gep:%[0-9]+]] = OpAccessChain [[ptr_uint]] [[var]] [[uint_1]] + store i32 0, ptr %1 +; CHECK: OpStore [[gep]] [[uint_0]] ret void } diff --git a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll index 66337b1ba2b37..518e011bf0be2 100644 --- a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll +++ b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll @@ -1,5 +1,4 @@ -; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -print-after-all | FileCheck %s -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %} +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - | FileCheck %s ; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0 @@ -24,35 +23,85 @@ ; CHECK-DAG: [[ptr_A:%[0-9]+]] = OpTypePointer Function [[A]] ; CHECK-DAG: [[ptr_B:%[0-9]+]] = OpTypePointer Function [[B]] -define void @main() #1 { -entry: - %0 = alloca %B, align 4 -; CHECK: [[tmp:%[0-9]+]] = OpVariable [[ptr_B]] Function - - %1 = getelementptr %B, ptr %0, i32 0, i32 0 +define internal ptr @gep_B_0(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_A]] [[tmp]] [[uint_0]] - %2 = getelementptr inbounds %B, ptr %0, i32 0, i32 0 + %res = getelementptr %B, ptr %base, i32 0, i32 0 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_0(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_A]] [[tmp]] [[uint_0]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 0 + ret ptr %res +} - %3 = getelementptr %B, ptr %0, i32 0, i32 1 +define internal ptr @gep_B_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[tmp]] [[uint_1]] - %4 = getelementptr inbounds %B, ptr %0, i32 0, i32 1 + %res = getelementptr %B, ptr %base, i32 0, i32 1 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_uint]] [[tmp]] [[uint_1]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 1 + ret ptr %res +} - %5 = getelementptr %B, ptr %0, i32 0, i32 2 +define internal ptr @gep_B_2(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_A]] [[tmp]] [[uint_2]] - %6 = getelementptr inbounds %B, ptr %0, i32 0, i32 2 + %res = getelementptr %B, ptr %base, i32 0, i32 2 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_2(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_A]] [[tmp]] [[uint_2]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 2 + ret ptr %res +} - %7 = getelementptr %B, ptr %0, i32 0, i32 2, i32 1 +define internal ptr @gep_B_2_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[tmp]] [[uint_2]] [[uint_1]] - %8 = getelementptr inbounds %B, ptr %0, i32 0, i32 2, i32 1 + %res = getelementptr %B, ptr %base, i32 0, i32 2, i32 1 + ret ptr %res +} + +define internal ptr @gep_inbounds_B_2_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: {{%[0-9]+}} = OpInBoundsAccessChain [[ptr_uint]] [[tmp]] [[uint_2]] [[uint_1]] + %res = getelementptr inbounds %B, ptr %base, i32 0, i32 2, i32 1 + ret ptr %res +} - %9 = getelementptr %B, ptr %0, i32 0, i32 2 - %10 = getelementptr %A, ptr %9, i32 0, i32 1 +define internal ptr @gep_B_2_A_1(ptr %base) { +; CHECK: [[tmp:%[0-9]+]] = OpFunctionParameter [[ptr_B]] ; CHECK: [[x:%[0-9]+]] = OpAccessChain [[ptr_A]] [[tmp]] [[uint_2]] ; CHECK: {{%[0-9]+}} = OpAccessChain [[ptr_uint]] [[x]] [[uint_1]] + %x = getelementptr %B, ptr %base, i32 0, i32 2 + %res = getelementptr %A, ptr %x, i32 0, i32 1 + ret ptr %res +} + +define void @main() #1 { +entry: + %0 = alloca %B, align 4 +; CHECK: [[tmp:%[0-9]+]] = OpVariable [[ptr_B]] Function + + %1 = call ptr @gep_B_0(ptr %0) + %2 = call ptr @gep_inbounds_B_0(ptr %0) + %3 = call ptr @gep_B_1(ptr %0) + %4 = call ptr @gep_inbounds_B_1(ptr %0) + %5 = call ptr @gep_B_2(ptr %0) + %6 = call ptr @gep_inbounds_B_2(ptr %0) + %7 = call ptr @gep_B_2_1(ptr %0) + %8 = call ptr @gep_inbounds_B_2_1(ptr %0) + %10 = call ptr @gep_B_2_A_1(ptr %0) ret void } diff --git a/llvm/test/CodeGen/SPIRV/phi-insert-point.ll b/llvm/test/CodeGen/SPIRV/phi-insert-point.ll index 70d121cdf4b3a..a34186d491257 100644 --- a/llvm/test/CodeGen/SPIRV/phi-insert-point.ll +++ b/llvm/test/CodeGen/SPIRV/phi-insert-point.ll @@ -36,9 +36,18 @@ ok: br label %exit exit: + store i64 %r1, ptr @g1 + store i64 %r2, ptr @g2 + store ptr addrspace(4) %r3, ptr @g3 + store ptr addrspace(4) %r4, ptr @g4 ret void } +@g1 = internal global i64 0 +@g2 = internal global i64 0 +@g3 = internal global ptr addrspace(4) null +@g4 = internal global ptr addrspace(4) null + define spir_kernel void @bar(i64 %arg_val, i64 %arg_val_def, ptr addrspace(4) byval(%struct) %arg_ptr, ptr addrspace(4) %arg_ptr_def) { entry: %fl = icmp eq i64 %arg_val, 0 @@ -55,5 +64,9 @@ ok: br label %exit exit: + store i64 %r1, ptr @g1 + store i64 %r2, ptr @g2 + store ptr addrspace(4) %r3, ptr @g3 + store ptr addrspace(4) %r4, ptr @g4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll index bc090ce55fbec..c250ebae12746 100644 --- a/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll +++ b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll @@ -20,11 +20,14 @@ ; CHECK: %[[#Case1]] = OpFunction define spir_func void @case1(i1 %b1, i1 %b2, i1 %b3) { entry: + %tmp.1 = alloca i8, align 1 ; CHECK: OpBranchConditional %[[#]] %[[#l1:]] %[[#l2:]] br i1 %b1, label %l1, label %l2 l1: %str = phi ptr addrspace(1) [ @.str.1, %entry ], [ @.str.2, %l2 ], [ @.str.2, %l3 ] + %v1 = load i8, ptr addrspace(1) %str, align 1 + store i8 %v1, ptr %tmp.1, align 1 br label %exit ; CHECK: %[[#l2]] = OpLabel @@ -51,11 +54,14 @@ exit: ; CHECK: %[[#Case2]] = OpFunction define spir_func void @case2(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%struct1) %str1, ptr addrspace(1) byval(%struct2) %str2) { entry: + %tmp.2 = alloca i8, align 1 ; CHECK: OpBranchConditional %[[#]] %[[#l1:]] %[[#l2:]] br i1 %b1, label %l1, label %l2 l1: %str = phi ptr addrspace(1) [ %str1, %entry ], [ %str2, %l2 ], [ %str2, %l3 ] + %v2 = load i8, ptr addrspace(1) %str, align 1 + store i8 %v2, ptr %tmp.2, align 1 br label %exit ; CHECK: %[[#l2]] = OpLabel @@ -83,10 +89,13 @@ define spir_func void @case3(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%str ; CHECK: OpBranchConditional %[[#]] %[[#l1:]] %[[#l2:]] entry: + %tmp.3 = alloca i8, align 1 br i1 %b1, label %l1, label %l2 l1: %str = phi ptr addrspace(1) [ %_arg_str1, %entry ], [ %str2, %l2 ], [ %str3, %l3 ] + %v3 = load i8, ptr addrspace(1) %str, align 1 + store i8 %v3, ptr %tmp.3, align 1 br label %exit ; CHECK: %[[#l2]] = OpLabel diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll index 7db1eed84bf7d..3382987bbd581 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-accesschain.ll @@ -26,9 +26,13 @@ %struct.S = type { i32 } %struct.__wrapper_class = type { [7 x %struct.S] } +@G_elem = global ptr null +@G_data = global i64 0 + define spir_kernel void @foo1(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) { entry: %elem = getelementptr inbounds i8, ptr %_arg_Arr, i64 0 + store ptr %elem, ptr @G_elem ret void } @@ -36,5 +40,6 @@ define spir_kernel void @foo2(ptr noundef byval(%struct.__wrapper_class) align 4 entry: %elem = getelementptr inbounds %struct.__wrapper_class, ptr %_arg_Arr, i64 0 %data = load i64, ptr %elem + store i64 %data, ptr @G_data ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll index d6a0071167cef..ed5652a750582 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/bitcast-fix-load.ll @@ -14,8 +14,11 @@ %struct.S = type { i32 } %struct.__wrapper_class = type { [7 x %struct.S] } +@G = global i32 0 + define spir_kernel void @foo(ptr noundef byval(%struct.__wrapper_class) align 4 %_arg_Arr) { entry: %val = load i32, ptr %_arg_Arr + store i32 %val, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll b/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll index 0e2730e18bf38..e47aa61a8acd7 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/gep-types-1.ll @@ -30,6 +30,8 @@ %"class.std::complex" = type { { double, double } } %class.anon = type { i32, ptr addrspace(4), [2 x [2 x %"class.std::complex"]] } +@G = global ptr addrspace(4) null + define weak_odr dso_local spir_kernel void @foo(i32 noundef %_arg_N, ptr addrspace(1) noundef align 8 %_arg_p) { entry: %Kernel = alloca %class.anon, align 8 @@ -38,5 +40,6 @@ entry: %r0 = addrspacecast ptr addrspace(1) %_arg_p to ptr addrspace(4) store ptr addrspace(4) %r0, ptr %p, align 8 %r3 = load ptr addrspace(4), ptr %p, align 8 + store ptr addrspace(4) %r3, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll index 7a09ac973b590..0e397ec51caaa 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-addressspace.ll @@ -7,9 +7,14 @@ ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTR1]] %[[#]] %[[#]] ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTR2]] %[[#]] %[[#]] +@G_c = global ptr addrspace(1) null +@G_d = global ptr addrspace(2) null + define spir_kernel void @foo(ptr addrspace(1) %a, ptr addrspace(2) %b) { entry: %c = getelementptr inbounds i8, ptr addrspace(1) %a, i32 1 + store ptr addrspace(1) %c, ptr @G_c %d = getelementptr inbounds i8, ptr addrspace(2) %b, i32 2 + store ptr addrspace(2) %d, ptr @G_d ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll index c822dbc5d6c0e..e12a809125248 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-base-type.ll @@ -7,9 +7,12 @@ ; CHECK: %[[#GEP:]] = OpInBoundsPtrAccessChain %[[#PTR]] %[[#ARG]] %[[#]] ; CHECK: %[[#]] = OpLoad %[[#FLOAT32]] %[[#GEP]] Aligned 4 +@G = global float 0.0 + define spir_kernel void @test1(ptr addrspace(1) %arg1) !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_type_qual !4 { %a = getelementptr inbounds float, ptr addrspace(1) %arg1, i64 1 %b = load float, ptr addrspace(1) %a, align 4 + store float %b, ptr @G ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll index 1d846a35a65aa..859253e5b18d9 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-bitcast-load.ll @@ -7,6 +7,9 @@ ; CHECK-DAG: %[[#PTR_VEC3:]] = OpTypePointer CrossWorkgroup %[[#VEC3]] ; CHECK-DAG: %[[#PTR_VEC4:]] = OpTypePointer CrossWorkgroup %[[#VEC4]] +@G_loadv1 = global <4 x i8> zeroinitializer +@G_loadv2 = global <4 x i8> zeroinitializer + ; CHECK: %[[#AC1:]] = OpInBoundsPtrAccessChain %[[#PTR_VEC3]] %[[#]] %[[#]] ; CHECK: %[[#BC1:]] = OpBitcast %[[#PTR_VEC4]] %[[#AC1]] ; CHECK: %[[#LD1:]] = OpLoad %[[#VEC4]] %[[#BC1]] Aligned 4 @@ -15,6 +18,7 @@ define spir_kernel void @foo(ptr addrspace(1) %a, i64 %b) { %index = getelementptr inbounds <3 x i8>, ptr addrspace(1) %a, i64 %b %loadv = load <4 x i8>, ptr addrspace(1) %index, align 4 + store <4 x i8> %loadv, ptr @G_loadv1 ret void } @@ -29,5 +33,6 @@ define spir_kernel void @bar(ptr addrspace(1) %a, i64 %b) { ; from older LLVM IR with typed pointers. %cast = bitcast ptr addrspace(1) %index to ptr addrspace(1) %loadv = load <4 x i8>, ptr addrspace(1) %cast, align 4 + store <4 x i8> %loadv, ptr @G_loadv2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll index a5e891dae6f11..3ae03edf5200f 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/getelementptr-kernel-arg-char.ll @@ -7,11 +7,15 @@ ; CHECK-DAG: %[[#PTRINT8:]] = OpTypePointer Workgroup %[[#INT8]] ; CHECK-DAG: %[[#CONST:]] = OpConstant %[[#INT64]] 1 +@G_gep1 = global ptr addrspace(3) null +@G_gep2 = global ptr addrspace(3) null + ; CHECK: %[[#PARAM1:]] = OpFunctionParameter %[[#PTRINT8]] define spir_kernel void @test1(ptr addrspace(3) %address) { ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTRINT8]] %[[#PARAM1]] %[[#CONST]] %cast = bitcast ptr addrspace(3) %address to ptr addrspace(3) %gep = getelementptr inbounds i8, ptr addrspace(3) %cast, i64 1 + store ptr addrspace(3) %gep, ptr @G_gep1 ret void } @@ -19,5 +23,6 @@ define spir_kernel void @test1(ptr addrspace(3) %address) { define spir_kernel void @test2(ptr addrspace(3) %address) { ; CHECK: %[[#]] = OpInBoundsPtrAccessChain %[[#PTRINT8]] %[[#PARAM2]] %[[#CONST]] %gep = getelementptr inbounds i8, ptr addrspace(3) %address, i64 1 + store ptr addrspace(3) %gep, ptr @G_gep2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll b/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll index 19451d23c6830..39563aecafec4 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll @@ -7,13 +7,16 @@ ; CHECK-DAG: %[[#value:]] = OpConstant %[[#type]] 456 ; CHECK-DAG: %[[#var:]] = OpVariable %[[#ptrty]] Private %[[#value]] +@G = internal global i32 0 + define hidden spir_func void @Foo() { %p = addrspacecast ptr addrspace(10) @PrivInternal to ptr %v = load i32, ptr %p, align 4 + store i32 %v, ptr @G ret void ; CHECK: OpLabel -; CHECK-NEXT: OpLoad %[[#type]] %[[#var]] Aligned 4 -; CHECK-Next: OpReturn +; CHECK: OpLoad %[[#type]] %[[#var]] Aligned 4 +; CHECK: OpReturn } define void @main() #1 { diff --git a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll index b3c68d22f9bdd..681fb70ad706d 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/load-addressspace.ll @@ -9,9 +9,14 @@ ; CHECK: %[[#]] = OpLoad %[[#INT8]] %[[#FNP1]] Aligned 1 ; CHECK: %[[#]] = OpLoad %[[#INT8]] %[[#FNP2]] Aligned 1 +@G_c = global i8 0 +@G_d = global i8 0 + define spir_kernel void @foo(ptr addrspace(1) %a, ptr addrspace(2) %b) { entry: %c = load i8, ptr addrspace(1) %a + store i8 %c, ptr @G_c %d = load i8, ptr addrspace(2) %b + store i8 %d, ptr @G_d ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll b/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll index a9e79df259c4f..44134f83cfec3 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/phi-chain-types.ll @@ -51,6 +51,7 @@ l1: l2: %val2 = phi ptr addrspace(4) [ %p, %l1 ], [ %val3, %l3 ] %val1 = phi ptr addrspace(4) [ addrspacecast (ptr addrspace(3) @G1 to ptr addrspace(4)), %l1 ], [ %val2, %l3 ] + store i16 0, ptr addrspace(4) %val1, align 2 br i1 %f2, label %l3, label %exit l3: @@ -75,6 +76,7 @@ l1: l2: %val1 = phi ptr addrspace(4) [ addrspacecast (ptr addrspace(3) @G1 to ptr addrspace(4)), %l1 ], [ %val2, %l3 ] %val2 = phi ptr addrspace(4) [ %p, %l1 ], [ %val3, %l3 ] + store i16 0, ptr addrspace(4) %val1, align 2 br i1 %f2, label %l3, label %exit exit: diff --git a/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll b/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll index 4d5549dfab8d9..123daa411810b 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/pointer-addrspacecast.ll @@ -10,6 +10,7 @@ ; CHECK-DAG: OpName %[[#func_chain:]] "chain" @global = internal addrspace(10) global i32 zeroinitializer +@G = global i32 0 define void @simple() { ; CHECK: %[[#func_simple]] = OpFunction @@ -17,6 +18,7 @@ entry: %ptr = getelementptr i32, ptr addrspace(10) @global, i32 0 %casted = addrspacecast ptr addrspace(10) %ptr to ptr %val = load i32, ptr %casted + store i32 %val, ptr @G ; CHECK: %{{.*}} = OpLoad %[[#uint]] %[[#var]] Aligned 4 ret void } @@ -31,6 +33,7 @@ entry: %e = addrspacecast ptr addrspace(10) %d to ptr %val = load i32, ptr %e + store i32 %val, ptr @G ; CHECK: %{{.*}} = OpLoad %[[#uint]] %[[#var]] Aligned 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll b/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll index 876cd3c20cf35..80ee36cfe15d2 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/ptr-eq-types.ll @@ -15,6 +15,9 @@ ; CHECK: OpGenericCastToPtr ; CHECK: OpPtrEqual +@G_b1 = global i1 0 +@G_b2 = global i1 0 + define spir_kernel void @foo(ptr addrspace(3) align 4 %_arg_local, ptr addrspace(1) align 4 %_arg_global) { entry: %p1 = getelementptr inbounds i32, ptr addrspace(1) %_arg_global, i64 0 @@ -24,9 +27,12 @@ entry: %p4 = addrspacecast ptr addrspace(1) %p3 to ptr addrspace(4) %p5 = tail call spir_func ptr addrspace(3) @_Z40__spirv_GenericCastToPtrExplicit_ToLocalPvi(ptr addrspace(4) %p4, i32 4) %b1 = icmp eq ptr addrspace(3) %p5, null + store i1 %b1, ptr @G_b1 %p6 = getelementptr inbounds i32, ptr addrspace(3) %p5, i64 0 %p7 = tail call spir_func ptr addrspace(3) @_Z40__spirv_GenericCastToPtrExplicit_ToLocalPvi(ptr addrspace(4) %p4, i32 4) %b2 = icmp eq ptr addrspace(3) %p7, null + store i1 %b2, ptr @G_b2 + store ptr addrspace(3) %p6, ptr addrspace(3) %p2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll b/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll index 7548f4757dbe6..6fc03a386d14d 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/resource-vector-load-store.ll @@ -4,18 +4,23 @@ @.str = private unnamed_addr constant [7 x i8] c"buffer\00", align 1 +; The i64 values in the extracts will be turned +; into immidiate values. There should be no 64-bit +; integers in the module. +; CHECK-NOT: OpTypeInt 64 0 + define void @main() "hlsl.shader"="pixel" { -; CHECK: %24 = OpFunction %2 None %3 ; -- Begin function main -; CHECK-NEXT: %1 = OpLabel -; CHECK-NEXT: %25 = OpVariable %13 Function %22 -; CHECK-NEXT: %26 = OpLoad %7 %23 -; CHECK-NEXT: %27 = OpImageRead %5 %26 %15 -; CHECK-NEXT: %28 = OpCompositeExtract %4 %27 0 -; CHECK-NEXT: %29 = OpCompositeExtract %4 %27 1 -; CHECK-NEXT: %30 = OpFAdd %4 %29 %28 -; CHECK-NEXT: %31 = OpCompositeInsert %5 %30 %27 0 -; CHECK-NEXT: %32 = OpLoad %7 %23 -; CHECK-NEXT: OpImageWrite %32 %15 %31 +; CHECK: %[[FUNC:[0-9]+]] = OpFunction %[[VOID:[0-9]+]] None %[[FNTYPE:[0-9]+]] ; -- Begin function main +; CHECK-NEXT: %[[LABEL:[0-9]+]] = OpLabel +; CHECK-NEXT: %[[VAR:[0-9]+]] = OpVariable %[[PTR_FN:[a-zA-Z0-9_]+]] Function %[[INIT:[a-zA-Z0-9_]+]] +; CHECK-NEXT: %[[LOAD1:[0-9]+]] = OpLoad %[[IMG_TYPE:[a-zA-Z0-9_]+]] %[[IMG_VAR:[a-zA-Z0-9_]+]] +; CHECK-NEXT: %[[READ:[0-9]+]] = OpImageRead %[[VEC4:[a-zA-Z0-9_]+]] %[[LOAD1]] %[[COORD:[a-zA-Z0-9_]+]] +; CHECK-NEXT: %[[EXTRACT1:[0-9]+]] = OpCompositeExtract %[[FLOAT:[a-zA-Z0-9_]+]] %[[READ]] 0 +; CHECK-NEXT: %[[EXTRACT2:[0-9]+]] = OpCompositeExtract %[[FLOAT]] %[[READ]] 1 +; CHECK-NEXT: %[[ADD:[0-9]+]] = OpFAdd %[[FLOAT]] %[[EXTRACT2]] %[[EXTRACT1]] +; CHECK-NEXT: %[[INSERT:[0-9]+]] = OpCompositeInsert %[[VEC4]] %[[ADD]] %[[READ]] 0 +; CHECK-NEXT: %[[LOAD2:[0-9]+]] = OpLoad %[[IMG_TYPE]] %[[IMG_VAR]] +; CHECK-NEXT: OpImageWrite %[[LOAD2]] %[[COORD]] %[[INSERT]] ; CHECK-NEXT: OpReturn ; CHECK-NEXT: OpFunctionEnd entry: diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll index 101116f437811..7409b3db51948 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-call-no-bitcast.ll @@ -34,6 +34,8 @@ %class.CustomType = type { i64 } +@G = global ptr addrspace(4) null + define linkonce_odr dso_local spir_func void @bar(ptr addrspace(4) noundef %first) { entry: %first.addr = alloca ptr addrspace(4) @@ -44,6 +46,7 @@ entry: call spir_func void @foo(i64 noundef 100, ptr addrspace(4) noundef dereferenceable(8) %first.addr.ascast, ptr addrspace(4) noundef dereferenceable(8) %temp.ascast) call spir_func void @foo(i64 noundef 100, ptr addrspace(4) noundef dereferenceable(8) %temp.ascast, ptr addrspace(4) noundef dereferenceable(8) %first.addr.ascast) %var = alloca ptr addrspace(4), align 8 + store ptr addrspace(4) null, ptr %var ret void } diff --git a/llvm/test/CodeGen/SPIRV/remove-dead-type-intrinsics.ll b/llvm/test/CodeGen/SPIRV/remove-dead-type-intrinsics.ll new file mode 100644 index 0000000000000..6bd640f813142 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/remove-dead-type-intrinsics.ll @@ -0,0 +1,31 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %} + +%A = type { + i32, + i32 +} + +%B = type { + %A, + i32, + %A +} + +; Make sure all struct types are removed. +; CHECK-NOT: OpTypeStruct + +; Make sure the GEPs and the function scope variable are removed. +; CHECK: OpFunction +; CHECK-NEXT: OpLabel +; CHECK-NEXT: OpReturn +; CHECK-NEXT: OpFunctionEnd +define void @main() #1 { +entry: + %0 = alloca %B, align 4 + %1 = getelementptr %B, ptr %0, i32 0, i32 2 + %2 = getelementptr %A, ptr %1, i32 0, i32 1 + ret void +} + +attributes #1 = { "hlsl.numthreads"="4,8,16" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll index 481bad9a26b7b..280f586891717 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll @@ -19,10 +19,15 @@ ; TODO: Add a check to ensure that there's no behavior change of bitreverse operation ; between the LLVM-IR and SPIR-V for i2 and i4 +@G_res2 = global i2 0 +@G_res4 = global i4 0 + define spir_func void @foo(i2 %a, i4 %b) { entry: %res2 = tail call i2 @llvm.bitreverse.i2(i2 %a) + store i2 %res2, ptr @G_res2 %res4 = tail call i4 @llvm.bitreverse.i4(i4 %b) + store i4 %res4, ptr @G_res4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll index 119dbe14446c1..68f33510b6a8d 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpGenericCastToPtr.ll @@ -45,6 +45,12 @@ entry: %GE = call spir_func ptr addrspace(1) @_Z41__spirv_GenericCastToPtrExplicit_ToGlobalPvi(ptr addrspace(4) %var1, i32 5) %LE = call spir_func ptr addrspace(3) @_Z40__spirv_GenericCastToPtrExplicit_ToLocalPvi(ptr addrspace(4) %var2, i32 4) %PE = call spir_func ptr @_Z42__spirv_GenericCastToPtrExplicit_ToPrivatePvi(ptr addrspace(4) %var3, i32 7) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 + store i32 0, ptr addrspace(1) %GE, align 4 + store i8 0, ptr addrspace(3) %LE, align 1 + store i32 0, ptr %PE, align 4 ret void } @@ -70,6 +76,9 @@ entry: %G = call spir_func ptr addrspace(1) @_Z9to_globalPv(ptr addrspace(4) %var1) %L = call spir_func ptr addrspace(3) @_Z8to_localPv(ptr addrspace(4) %var2) %P = call spir_func ptr @_Z10to_privatePv(ptr addrspace(4) %var3) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 ret void } @@ -114,6 +123,12 @@ entry: %GE = call spir_func ptr addrspace(1) @__spirv_GenericCastToPtrExplicit_ToGlobal(ptr addrspace(4) %var1, i32 5) %LE = call spir_func ptr addrspace(3) @__spirv_GenericCastToPtrExplicit_ToLocal(ptr addrspace(4) %var2, i32 4) %PE = call spir_func ptr @__spirv_GenericCastToPtrExplicit_ToPrivate(ptr addrspace(4) %var3, i32 7) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 + store i32 0, ptr addrspace(1) %GE, align 4 + store i8 0, ptr addrspace(3) %LE, align 1 + store i32 0, ptr %PE, align 4 ret void } @@ -139,6 +154,9 @@ entry: %G = call spir_func ptr addrspace(1) @to_global(ptr addrspace(4) %var1) %L = call spir_func ptr addrspace(3) @to_local(ptr addrspace(4) %var2) %P = call spir_func ptr @to_private(ptr addrspace(4) %var3) + store i32 0, ptr addrspace(1) %G, align 4 + store i8 0, ptr addrspace(3) %L, align 1 + store i32 0, ptr %P, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll index 818243ab19e41..9f08a65c16866 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpPtrCastToGeneric.ll @@ -16,9 +16,13 @@ ; CHECK-SPIRV: OpGenericCastToPtr %[[#LocalCharPtr]] %[[#Ptr2]] ; CHECK-SPIRV: OpFunctionEnd +@G_p = global ptr addrspace(3) null +@G_p2 = global ptr addrspace(3) null + define spir_kernel void @foo(ptr addrspace(1) %arg) { entry: %p = addrspacecast ptr addrspace(1) %arg to ptr addrspace(3) + store ptr addrspace(3) %p, ptr @G_p ret void } @@ -26,5 +30,6 @@ define spir_kernel void @bar(ptr addrspace(1) %arg) { entry: %p1 = addrspacecast ptr addrspace(1) %arg to ptr addrspace(4) %p2 = addrspacecast ptr addrspace(4) %p1 to ptr addrspace(3) + store ptr addrspace(3) %p2, ptr @G_p2 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll index 46eaba9d5ceb1..c752e278927a9 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/fcmp.ll @@ -184,6 +184,8 @@ ; CHECK-SPIRV: %[[#r89]] = OpUnordered %[[#bool]] ; CHECK-SPIRV: %[[#r90]] = OpUnordered %[[#bool]] +@G = global [90 x i1] zeroinitializer + define spir_kernel void @testFCmp(float %a, float %b) local_unnamed_addr { entry: %r1 = fcmp oeq float %a, %b @@ -276,5 +278,185 @@ entry: %r88 = fcmp uno float %a, %b %r89 = fcmp ninf uno float %a, %b %r90 = fcmp nsz uno float %a, %b + %p1 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 0 + store i1 %r1, ptr %p1 + %p2 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 1 + store i1 %r2, ptr %p2 + %p3 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 2 + store i1 %r3, ptr %p3 + %p4 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 3 + store i1 %r4, ptr %p4 + %p5 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 4 + store i1 %r5, ptr %p5 + %p6 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 5 + store i1 %r6, ptr %p6 + %p7 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 6 + store i1 %r7, ptr %p7 + %p8 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 7 + store i1 %r8, ptr %p8 + %p9 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 8 + store i1 %r9, ptr %p9 + %p10 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 9 + store i1 %r10, ptr %p10 + %p11 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 10 + store i1 %r11, ptr %p11 + %p12 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 11 + store i1 %r12, ptr %p12 + %p13 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 12 + store i1 %r13, ptr %p13 + %p14 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 13 + store i1 %r14, ptr %p14 + %p15 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 14 + store i1 %r15, ptr %p15 + %p16 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 15 + store i1 %r16, ptr %p16 + %p17 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 16 + store i1 %r17, ptr %p17 + %p18 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 17 + store i1 %r18, ptr %p18 + %p19 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 18 + store i1 %r19, ptr %p19 + %p20 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 19 + store i1 %r20, ptr %p20 + %p21 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 20 + store i1 %r21, ptr %p21 + %p22 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 21 + store i1 %r22, ptr %p22 + %p23 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 22 + store i1 %r23, ptr %p23 + %p24 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 23 + store i1 %r24, ptr %p24 + %p25 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 24 + store i1 %r25, ptr %p25 + %p26 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 25 + store i1 %r26, ptr %p26 + %p27 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 26 + store i1 %r27, ptr %p27 + %p28 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 27 + store i1 %r28, ptr %p28 + %p29 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 28 + store i1 %r29, ptr %p29 + %p30 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 29 + store i1 %r30, ptr %p30 + %p31 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 30 + store i1 %r31, ptr %p31 + %p32 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 31 + store i1 %r32, ptr %p32 + %p33 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 32 + store i1 %r33, ptr %p33 + %p34 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 33 + store i1 %r34, ptr %p34 + %p35 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 34 + store i1 %r35, ptr %p35 + %p36 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 35 + store i1 %r36, ptr %p36 + %p37 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 36 + store i1 %r37, ptr %p37 + %p38 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 37 + store i1 %r38, ptr %p38 + %p39 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 38 + store i1 %r39, ptr %p39 + %p40 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 39 + store i1 %r40, ptr %p40 + %p41 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 40 + store i1 %r41, ptr %p41 + %p42 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 41 + store i1 %r42, ptr %p42 + %p43 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 42 + store i1 %r43, ptr %p43 + %p44 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 43 + store i1 %r44, ptr %p44 + %p45 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 44 + store i1 %r45, ptr %p45 + %p46 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 45 + store i1 %r46, ptr %p46 + %p47 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 46 + store i1 %r47, ptr %p47 + %p48 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 47 + store i1 %r48, ptr %p48 + %p49 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 48 + store i1 %r49, ptr %p49 + %p50 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 49 + store i1 %r50, ptr %p50 + %p51 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 50 + store i1 %r51, ptr %p51 + %p52 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 51 + store i1 %r52, ptr %p52 + %p53 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 52 + store i1 %r53, ptr %p53 + %p54 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 53 + store i1 %r54, ptr %p54 + %p55 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 54 + store i1 %r55, ptr %p55 + %p56 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 55 + store i1 %r56, ptr %p56 + %p57 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 56 + store i1 %r57, ptr %p57 + %p58 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 57 + store i1 %r58, ptr %p58 + %p59 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 58 + store i1 %r59, ptr %p59 + %p60 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 59 + store i1 %r60, ptr %p60 + %p61 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 60 + store i1 %r61, ptr %p61 + %p62 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 61 + store i1 %r62, ptr %p62 + %p63 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 62 + store i1 %r63, ptr %p63 + %p64 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 63 + store i1 %r64, ptr %p64 + %p65 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 64 + store i1 %r65, ptr %p65 + %p66 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 65 + store i1 %r66, ptr %p66 + %p67 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 66 + store i1 %r67, ptr %p67 + %p68 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 67 + store i1 %r68, ptr %p68 + %p69 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 68 + store i1 %r69, ptr %p69 + %p70 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 69 + store i1 %r70, ptr %p70 + %p71 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 70 + store i1 %r71, ptr %p71 + %p72 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 71 + store i1 %r72, ptr %p72 + %p73 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 72 + store i1 %r73, ptr %p73 + %p74 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 73 + store i1 %r74, ptr %p74 + %p75 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 74 + store i1 %r75, ptr %p75 + %p76 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 75 + store i1 %r76, ptr %p76 + %p77 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 76 + store i1 %r77, ptr %p77 + %p78 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 77 + store i1 %r78, ptr %p78 + %p79 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 78 + store i1 %r79, ptr %p79 + %p80 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 79 + store i1 %r80, ptr %p80 + %p81 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 80 + store i1 %r81, ptr %p81 + %p82 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 81 + store i1 %r82, ptr %p82 + %p83 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 82 + store i1 %r83, ptr %p83 + %p84 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 83 + store i1 %r84, ptr %p84 + %p85 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 84 + store i1 %r85, ptr %p85 + %p86 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 85 + store i1 %r86, ptr %p86 + %p87 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 86 + store i1 %r87, ptr %p87 + %p88 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 87 + store i1 %r88, ptr %p88 + %p89 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 88 + store i1 %r89, ptr %p89 + %p90 = getelementptr inbounds [90 x i1], ptr @G, i32 0, i32 89 + store i1 %r90, ptr %p90 ret void } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll index c8691c32710ad..7658362773218 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-event-null.ll @@ -31,9 +31,12 @@ %StructEvent = type { target("spirv.Event") } +@G_r = global target("spirv.Event") poison + define spir_kernel void @test_half(ptr addrspace(3) %_arg1, ptr addrspace(1) %_arg2) { entry: %r = tail call spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU3AS3Dv2_DF16_PU3AS1KS_mm9ocl_event(i32 2, ptr addrspace(3) %_arg1, ptr addrspace(1) %_arg2, i64 16, i64 10, target("spirv.Event") zeroinitializer) + store target("spirv.Event") %r, ptr @G_r ret void } @@ -42,7 +45,6 @@ declare dso_local spir_func target("spirv.Event") @_Z22__spirv_GroupAsyncCopyjPU ; CHECK: OpFunction ; CHECK: OpFunctionParameter ; CHECK: %[[#Src:]] = OpFunctionParameter -; CHECK: OpVariable %[[#TyStructPtr]] Function ; CHECK: %[[#EventVar:]] = OpVariable %[[#TyEventPtr]] Function ; CHECK: %[[#Dest:]] = OpInBoundsPtrAccessChain ; CHECK: %[[#CopyRes:]] = OpGroupAsyncCopy %[[#TyEvent]] %[[#]] %[[#Dest]] %[[#Src]] %[[#]] %[[#]] %[[#ConstEvent]] diff --git a/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll b/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll index 46668645f418b..9c8b4070d834d 100644 --- a/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll +++ b/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll @@ -68,6 +68,27 @@ ; SPV-DAG: %[[#ones_64:]] = OpConstantComposite %[[#vec_64]] %[[#one_64]] %[[#one_64]] ; SPV-DAG: %[[#pointer:]] = OpTypePointer CrossWorkgroup %[[#float]] +@G_s1 = global i8 0 +@G_s2 = global i16 0 +@G_s3 = global i32 0 +@G_s4 = global i64 0 +@G_s5 = global <2 x i8> zeroinitializer +@G_s6 = global <2 x i16> zeroinitializer +@G_s7 = global <2 x i32> zeroinitializer +@G_s8 = global <2 x i64> zeroinitializer +@G_z1 = global i8 0 +@G_z2 = global i16 0 +@G_z3 = global i32 0 +@G_z4 = global i64 0 +@G_z5 = global <2 x i8> zeroinitializer +@G_z6 = global <2 x i16> zeroinitializer +@G_z7 = global <2 x i32> zeroinitializer +@G_z8 = global <2 x i64> zeroinitializer +@G_ufp1 = global float 0.0 +@G_ufp2 = global <2 x float> zeroinitializer +@G_sfp1 = global float 0.0 +@G_sfp2 = global <2 x float> zeroinitializer + ; SPV-DAG: OpFunction ; SPV-DAG: %[[#A:]] = OpFunctionParameter %[[#pointer]] ; SPV-DAG: %[[#B:]] = OpFunctionParameter %[[#]] @@ -87,47 +108,67 @@ entry: ; SPV-DAG: %[[#s1]] = OpSelect %[[#int_8]] %[[#i1s]] %[[#mone_8]] %[[#zero_8]] %s1 = sext i1 %i1s to i8 + store i8 %s1, ptr @G_s1 ; SPV-DAG: %[[#s2]] = OpSelect %[[#int_16]] %[[#i1s]] %[[#mone_16]] %[[#zero_16]] %s2 = sext i1 %i1s to i16 + store i16 %s2, ptr @G_s2 ; SPV-DAG: %[[#s3]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#mone_32]] %[[#zero_32]] %s3 = sext i1 %i1s to i32 + store i32 %s3, ptr @G_s3 ; SPV-DAG: %[[#s4]] = OpSelect %[[#int_64]] %[[#i1s]] %[[#mone_64]] %[[#zero_64]] %s4 = sext i1 %i1s to i64 + store i64 %s4, ptr @G_s4 ; SPV-DAG: %[[#s5]] = OpSelect %[[#vec_8]] %[[#i1v]] %[[#mones_8]] %[[#zeros_8]] %s5 = sext <2 x i1> %i1v to <2 x i8> + store <2 x i8> %s5, ptr @G_s5 ; SPV-DAG: %[[#s6]] = OpSelect %[[#vec_16]] %[[#i1v]] %[[#mones_16]] %[[#zeros_16]] %s6 = sext <2 x i1> %i1v to <2 x i16> + store <2 x i16> %s6, ptr @G_s6 ; SPV-DAG: %[[#s7]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#mones_32]] %[[#zeros_32]] %s7 = sext <2 x i1> %i1v to <2 x i32> + store <2 x i32> %s7, ptr @G_s7 ; SPV-DAG: %[[#s8]] = OpSelect %[[#vec_64]] %[[#i1v]] %[[#mones_64]] %[[#zeros_64]] %s8 = sext <2 x i1> %i1v to <2 x i64> + store <2 x i64> %s8, ptr @G_s8 ; SPV-DAG: %[[#z1]] = OpSelect %[[#int_8]] %[[#i1s]] %[[#one_8]] %[[#zero_8]] %z1 = zext i1 %i1s to i8 + store i8 %z1, ptr @G_z1 ; SPV-DAG: %[[#z2]] = OpSelect %[[#int_16]] %[[#i1s]] %[[#one_16]] %[[#zero_16]] %z2 = zext i1 %i1s to i16 + store i16 %z2, ptr @G_z2 ; SPV-DAG: %[[#z3]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#one_32]] %[[#zero_32]] %z3 = zext i1 %i1s to i32 + store i32 %z3, ptr @G_z3 ; SPV-DAG: %[[#z4]] = OpSelect %[[#int_64]] %[[#i1s]] %[[#one_64]] %[[#zero_64]] %z4 = zext i1 %i1s to i64 + store i64 %z4, ptr @G_z4 ; SPV-DAG: %[[#z5]] = OpSelect %[[#vec_8]] %[[#i1v]] %[[#ones_8]] %[[#zeros_8]] %z5 = zext <2 x i1> %i1v to <2 x i8> + store <2 x i8> %z5, ptr @G_z5 ; SPV-DAG: %[[#z6]] = OpSelect %[[#vec_16]] %[[#i1v]] %[[#ones_16]] %[[#zeros_16]] %z6 = zext <2 x i1> %i1v to <2 x i16> + store <2 x i16> %z6, ptr @G_z6 ; SPV-DAG: %[[#z7]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#ones_32]] %[[#zeros_32]] %z7 = zext <2 x i1> %i1v to <2 x i32> + store <2 x i32> %z7, ptr @G_z7 ; SPV-DAG: %[[#z8]] = OpSelect %[[#vec_64]] %[[#i1v]] %[[#ones_64]] %[[#zeros_64]] %z8 = zext <2 x i1> %i1v to <2 x i64> + store <2 x i64> %z8, ptr @G_z8 ; SPV-DAG: %[[#ufp1_res:]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#one_32]] %[[#zero_32]] ; SPV-DAG: %[[#ufp1]] = OpConvertUToF %[[#float]] %[[#ufp1_res]] %ufp1 = uitofp i1 %i1s to float + store float %ufp1, ptr @G_ufp1 ; SPV-DAG: %[[#ufp2_res:]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#ones_32]] %[[#zeros_32]] ; SPV-DAG: %[[#ufp2]] = OpConvertUToF %[[#vec_float]] %[[#ufp2_res]] %ufp2 = uitofp <2 x i1> %i1v to <2 x float> + store <2 x float> %ufp2, ptr @G_ufp2 ; SPV-DAG: %[[#sfp1_res:]] = OpSelect %[[#int_32]] %[[#i1s]] %[[#one_32]] %[[#zero_32]] ; SPV-DAG: %[[#sfp1]] = OpConvertSToF %[[#float]] %[[#sfp1_res]] %sfp1 = sitofp i1 %i1s to float + store float %sfp1, ptr @G_sfp1 ; SPV-DAG: %[[#sfp2_res:]] = OpSelect %[[#vec_32]] %[[#i1v]] %[[#ones_32]] %[[#zeros_32]] ; SPV-DAG: %[[#sfp2]] = OpConvertSToF %[[#vec_float]] %[[#sfp2_res]] %sfp2 = sitofp <2 x i1> %i1v to <2 x float> + store <2 x float> %sfp2, ptr @G_sfp2 ret void } diff --git a/llvm/test/CodeGen/SystemZ/zos-ppa1-argarea.ll b/llvm/test/CodeGen/SystemZ/zos-ppa1-argarea.ll new file mode 100644 index 0000000000000..511bc46567607 --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/zos-ppa1-argarea.ll @@ -0,0 +1,66 @@ +; RUN: llc < %s -mtriple=s390x-ibm-zos -emit-gnuas-syntax-on-zos=0 | FileCheck %s +%struct.LargeStruct_t = type { [33 x i32] } + +@GlobLargeS = hidden global %struct.LargeStruct_t zeroinitializer, align 4 +@GlobInt = hidden global i32 0, align 4 + +; === Check that function with small frame does not emit PPA1 Argument Area Length. +define void @fSmallOutArgArea() { +; CHECK-LABEL: L#EPM_fSmallOutArgArea_0 DS 0H +; CHECK: * Bit 1: 1 = Leaf function +; CHECK: * Bit 2: 0 = Does not use alloca +; CHECK: DC XL4'00000008' +; CHECK: fSmallOutArgArea DS 0H +; CHECK: L#PPA1_fSmallOutArgArea_0 DS 0H +; CHECK: * PPA1 Flags 3 +; CHECK: DC XL1'00' + ret void +} + +; === Check that function with large frame does emit PPA1 Argument Area Length. +define void @fLargeOutArgArea() { +; CHECK-LABEL: L#EPM_fLargeOutArgArea_0 DS 0H +; CHECK: * Bit 1: 0 = Non-leaf function +; CHECK: * Bit 2: 0 = Does not use alloca +; CHECK: DC XL4'00000220' +; CHECK: fLargeOutArgArea DS 0H +; CHECK: L#PPA1_fLargeOutArgArea_0 DS 0H +; CHECK: * PPA1 Flags 3 +; CHECK: * Bit 1: 1 = Argument Area Length is in optional area +; CHECK: DC XL1'40' +; CHECK: * Argument Area Length +; CHECK: DC XL4'00000140' + %1 = load [33 x i32], ptr @GlobLargeS, align 4 + call void @fLargeParm([33 x i32] inreg %1) + ret void +} + +; === Check that function with parameter does emit PPA1 Length/4 of parms +define void @fLargeParm([33 x i64] inreg %arr) { +; CHECK-LABEL: L#EPM_fLargeParm_0 DS 0H +; CHECK: * Length/4 of Parms +; CHECK: DC XL2'0042' + %1 = extractvalue [33 x i64] %arr, 1 + call void @foo(i64 %1) + ret void +} + +; === Check that function with alloca call does emit PPA1 Argument Area Length. +define hidden void @fHasAlloca() { +; CHECK-LABEL: L#EPM_fHasAlloca_0 DS 0H +; CHECK: * Bit 2: 1 = Uses alloca +; CHECK: fHasAlloca DS 0H +; CHECK: L#PPA1_fHasAlloca_0 DS 0H +; CHECK: * PPA1 Flags 3 +; CHECK: * Bit 1: 1 = Argument Area Length is in optional area +; CHECK: DC XL1'40' +; CHECK: * Argument Area Length +; CHECK: DC XL4'00000040' + %p = alloca ptr, align 4 + %1 = load i32, ptr @GlobInt, align 4 + %2 = alloca i8, i32 %1, align 8 + store ptr %2, ptr %p, align 4 + ret void +} + +declare void @foo(i64) diff --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll index 97894db1188e2..f8a04f8514988 100644 --- a/llvm/test/CodeGen/X86/addcarry.ll +++ b/llvm/test/CodeGen/X86/addcarry.ll @@ -1513,3 +1513,41 @@ define i1 @pr84831(i64 %arg) { %trunc = trunc i63 %or to i1 ret i1 %trunc } + +define void @pr169691(ptr %p0, i64 %implicit, i1 zeroext %carry) { +; CHECK-LABEL: pr169691: +; CHECK: # %bb.0: +; CHECK-NEXT: movq (%rdi), %rax +; CHECK-NEXT: addq %rsi, %rax +; CHECK-NEXT: setb %cl +; CHECK-NEXT: movl %edx, %edx +; CHECK-NEXT: addq %rax, %rdx +; CHECK-NEXT: setb %al +; CHECK-NEXT: orb %cl, %al +; CHECK-NEXT: movq %rdx, (%rdi) +; CHECK-NEXT: addq 8(%rdi), %rsi +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: addq %rsi, %rax +; CHECK-NEXT: movq %rax, 8(%rdi) +; CHECK-NEXT: retq + %a0 = load i64, ptr %p0, align 8 + %uaddo0 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a0, i64 %implicit) + %uaddo0.1 = extractvalue { i64, i1 } %uaddo0, 1 + %uaddo0.0 = extractvalue { i64, i1 } %uaddo0, 0 + %zextc = zext i1 %carry to i64 + %uaddo0b = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %uaddo0.0, i64 %zextc) + %uaddo0b.1 = extractvalue { i64, i1 } %uaddo0b, 1 + %uaddo0b.0 = extractvalue { i64, i1 } %uaddo0b, 0 + %carry0 = or i1 %uaddo0.1, %uaddo0b.1 + store i64 %uaddo0b.0, ptr %p0, align 8 + + %p1 = getelementptr inbounds nuw i8, ptr %p0, i64 8 + %a1 = load i64, ptr %p1, align 8 + %uaddo1 = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a1, i64 %implicit) + %uaddo1.0 = extractvalue { i64, i1 } %uaddo1, 0 + %zext0 = zext i1 %carry0 to i64 + %uaddo1b = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %uaddo1.0, i64 %zext0) + %uaddo1b.0 = extractvalue { i64, i1 } %uaddo1b, 0 + store i64 %uaddo1b.0, ptr %p1, align 8 + ret void +} diff --git a/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll b/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll index ab76ce04dcdb0..3916a205dd19c 100644 --- a/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll +++ b/llvm/test/DebugInfo/RISCV/relax_dwo_ranges.ll @@ -20,68 +20,81 @@ ; RUN: llc -dwarf-version=5 -split-dwarf-file=foo.dwo -O0 -mtriple=riscv64-unknown-linux-gnu -filetype=obj relax_dwo_ranges.ll -o %t.o ; RUN: llvm-dwarfdump -v %t.o | FileCheck --check-prefix=DWARF5 %s -; RUN: llvm-dwarfdump --debug-info %t.o 2> %t.txt -; RUN: FileCheck --input-file=%t.txt %s --check-prefix=RELOCS --implicit-check-not=warning: +; RUN: llvm-dwarfdump --debug-info %t.o > /dev/null 2>&1 | count 0 +; RUN: llvm-objdump -h %t.o | FileCheck --check-prefix=HDR %s ; RUN: llc -dwarf-version=4 -split-dwarf-file=foo.dwo -O0 -mtriple=riscv64-unknown-linux-gnu -filetype=obj relax_dwo_ranges.ll -o %t.o ; RUN: llvm-dwarfdump -v %t.o | FileCheck --check-prefix=DWARF4 %s -; RUN: llvm-dwarfdump --debug-info %t.o 2> %t.txt -; RUN: FileCheck --input-file=%t.txt %s --check-prefix=RELOCS --implicit-check-not=warning: +; RUN: llvm-dwarfdump --debug-info %t.o > /dev/null 2>&1 | count 0 +; RUN: llvm-objdump -h %t.o | FileCheck --check-prefix=HDR %s -; Currently, square() still uses an offset to represent the function's end address, -; which requires a relocation here. -; RELOCS: warning: unexpected relocations for dwo section '.debug_info.dwo' +; Make sure we don't produce any relocations in any .dwo section +; HDR-NOT: .rela.{{.*}}.dwo +; Ensure that 'square()' function uses indexed start and end addresses ; DWARF5: .debug_info.dwo contents: ; DWARF5: DW_TAG_subprogram -; DWARF5-NEXT: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x0000000000000000 ".text") -; DWARF5-NEXT: DW_AT_high_pc [DW_FORM_data4] (0x00000000) -; DWARF5: DW_AT_name {{.*}} "square") +; DWARF5-NEXT: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x0000000000000000 ".text") +; DWARF5-NEXT: DW_AT_high_pc [DW_FORM_addrx] (indexed (00000001) address = 0x000000000000002c ".text") +; DWARF5: DW_AT_name {{.*}} "square") ; DWARF5: DW_TAG_formal_parameter +; HDR-NOT: .rela.{{.*}}.dwo + ; Ensure there is no unnecessary addresses in .o file ; DWARF5: .debug_addr contents: ; DWARF5: Addrs: [ ; DWARF5-NEXT: 0x0000000000000000 ; DWARF5-NEXT: 0x000000000000002c +; DWARF5-NEXT: 0x000000000000002c ; DWARF5-NEXT: 0x000000000000003e ; DWARF5-NEXT: 0x000000000000006e ; DWARF5-NEXT: ] +; HDR-NOT: .rela.{{.*}}.dwo + ; Ensure that 'boo()' and 'main()' use DW_RLE_startx_length and DW_RLE_startx_endx ; entries respectively ; DWARF5: .debug_rnglists.dwo contents: ; DWARF5: ranges: -; DWARF5-NEXT: 0x00000014: [DW_RLE_startx_length]: 0x0000000000000001, 0x0000000000000012 => [0x000000000000002c, 0x000000000000003e) +; DWARF5-NEXT: 0x00000014: [DW_RLE_startx_length]: 0x0000000000000002, 0x0000000000000012 => [0x000000000000002c, 0x000000000000003e) ; DWARF5-NEXT: 0x00000017: [DW_RLE_end_of_list ] -; DWARF5-NEXT: 0x00000018: [DW_RLE_startx_endx ]: 0x0000000000000002, 0x0000000000000003 => [0x000000000000003e, 0x000000000000006e) +; DWARF5-NEXT: 0x00000018: [DW_RLE_startx_endx ]: 0x0000000000000003, 0x0000000000000004 => [0x000000000000003e, 0x000000000000006e) ; DWARF5-NEXT: 0x0000001b: [DW_RLE_end_of_list ] ; DWARF5-EMPTY: +; HDR-NOT: .rela.{{.*}}.dwo + ; DWARF4: .debug_info.dwo contents: ; DWARF4: DW_TAG_subprogram -; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000000) address = 0x0000000000000000 ".text") -; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_data4] (0x00000000) -; DWARF4: DW_AT_name {{.*}} "square") +; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000000) address = 0x0000000000000000 ".text") +; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_GNU_addr_index] (indexed (00000001) address = 0x000000000000002c ".text") +; DWARF4: DW_AT_name {{.*}} "square") ; DWARF4: DW_TAG_subprogram -; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000001) address = 0x000000000000002c ".text") +; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000002) address = 0x000000000000002c ".text") ; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_data4] (0x00000012) -; DWARF4: DW_AT_name {{.*}} "boo") +; DWARF4: DW_AT_name {{.*}} "boo") ; DWARF4: DW_TAG_subprogram -; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000002) address = 0x000000000000003e ".text") -; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_data4] (0x00000000) -; DWARF4: DW_AT_name {{.*}} "main") +; DWARF4-NEXT: DW_AT_low_pc [DW_FORM_GNU_addr_index] (indexed (00000003) address = 0x000000000000003e ".text") +; DWARF4-NEXT: DW_AT_high_pc [DW_FORM_GNU_addr_index] (indexed (00000004) address = 0x000000000000006e ".text") +; DWARF4: DW_AT_name {{.*}} "main") + +; HDR-NOT: .rela.{{.*}}.dwo ; Ensure there is no unnecessary addresses in .o file ; DWARF4: .debug_addr contents: ; DWARF4: Addrs: [ ; DWARF4-NEXT: 0x0000000000000000 ; DWARF4-NEXT: 0x000000000000002c +; DWARF4-NEXT: 0x000000000000002c ; DWARF4-NEXT: 0x000000000000003e +; DWARF4-NEXT: 0x000000000000006e ; DWARF4-NEXT: ] +; HDR-NOT: .rela.{{.*}}.dwo + #--- relax_dwo_ranges.cpp __attribute__((noinline)) int boo(); diff --git a/llvm/test/Instrumentation/BoundsChecking/runtimes.ll b/llvm/test/Instrumentation/BoundsChecking/runtimes.ll index 84dd51cd3fa28..74e1eef7ebe35 100644 --- a/llvm/test/Instrumentation/BoundsChecking/runtimes.ll +++ b/llvm/test/Instrumentation/BoundsChecking/runtimes.ll @@ -8,6 +8,9 @@ ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=RTABORT-NOMERGE ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRT-NOMERGE ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRTABORT-NOMERGE + +; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRT-PRESERVE-NOMERGE +; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=MINRTABORT-NOMERGE ; ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=TR-GUARD-COMMON,TR-GUARD-THREE ; RUN: opt < %s -passes='bounds-checking' -S | FileCheck %s --check-prefixes=TR-GUARD-COMMON,TR-GUARD-THIRTEEN @@ -95,6 +98,22 @@ define void @f1(i64 %x) nounwind { ; RTABORT-NOMERGE-NEXT: call void @__ubsan_handle_local_out_of_bounds_abort() #[[ATTR2:[0-9]+]], !nosanitize [[META0]] ; RTABORT-NOMERGE-NEXT: unreachable, !nosanitize [[META0]] ; +; MINRT-PRESERVE-NOMERGE-LABEL: define void @f1( +; MINRT-PRESERVE-NOMERGE-SAME: i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] { +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP1:%.*]] = mul i64 16, [[X]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP2:%.*]] = alloca i128, i64 [[X]], align 8 +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], 0, !nosanitize [[META0:![0-9]+]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 16, !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP5:%.*]] = or i1 false, [[TMP4]], !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP6:%.*]] = or i1 false, [[TMP5]], !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: br i1 [[TMP6]], label %[[TRAP:.*]], label %[[BB7:.*]] +; MINRT-PRESERVE-NOMERGE: [[BB7]]: +; MINRT-PRESERVE-NOMERGE-NEXT: [[TMP8:%.*]] = load i128, ptr [[TMP2]], align 4 +; MINRT-PRESERVE-NOMERGE-NEXT: ret void +; MINRT-PRESERVE-NOMERGE: [[TRAP]]: +; MINRT-PRESERVE-NOMERGE-NEXT: call preserve_allcc void @__ubsan_handle_local_out_of_bounds_minimal_preserve() #[[ATTR1:[0-9]+]], !nosanitize [[META0]] +; MINRT-PRESERVE-NOMERGE-NEXT: br label %[[BB7]], !nosanitize [[META0]] +; ; MINRT-NOMERGE-LABEL: define void @f1( ; MINRT-NOMERGE-SAME: i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; MINRT-NOMERGE-NEXT: [[TMP1:%.*]] = mul i64 16, [[X]] diff --git a/llvm/test/MC/PowerPC/ppc64-encoding-ISA31-errors.s b/llvm/test/MC/PowerPC/ppc64-encoding-ISA31-errors.s deleted file mode 100644 index 69cdb5cb75ebb..0000000000000 --- a/llvm/test/MC/PowerPC/ppc64-encoding-ISA31-errors.s +++ /dev/null @@ -1,71 +0,0 @@ -# RUN: not llvm-mc -triple powerpc64-unknown-unknown < %s 2> %t -# RUN: FileCheck < %t %s -# RUN: not llvm-mc -triple powerpc64le-unknown-unknown < %s 2> %t -# RUN: FileCheck < %t %s - - # CHECK: error: invalid operand for instruction -paddi 1, 1, 32, 1 - -# CHECK: error: invalid operand for instruction -pld 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -paddi 1, 1, 32, 1 - -# CHECK: error: invalid operand for instruction -plbz 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plfd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plfs 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plha 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plhz 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plwa 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plwz 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plxsd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plxssp 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -plxv 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstb 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstfd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstfs 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -psth 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstw 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstxsd 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstxssp 1, 32(1), 1 - -# CHECK: error: invalid operand for instruction -pstxv 1, 32(1), 1 - diff --git a/llvm/test/MC/PowerPC/ppc64-errors.s b/llvm/test/MC/PowerPC/ppc64-errors.s index 17905a396885a..8598174300e42 100644 --- a/llvm/test/MC/PowerPC/ppc64-errors.s +++ b/llvm/test/MC/PowerPC/ppc64-errors.s @@ -4,6 +4,71 @@ # RUN: not llvm-mc -triple powerpc64le-unknown-unknown < %s 2> %t # RUN: FileCheck < %t %s +# From ISA31 + +# CHECK: error: invalid operand for instruction +paddi 1, 1, 32, 1 + +# CHECK: error: invalid operand for instruction +pld 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plbz 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plfd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plfs 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plha 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plhz 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plwa 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plwz 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plxsd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plxssp 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +plxv 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstb 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstfd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstfs 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +psth 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstw 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstxsd 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstxssp 1, 32(1), 1 + +# CHECK: error: invalid operand for instruction +pstxv 1, 32(1), 1 + # Register operands # CHECK: error: invalid operand for instruction diff --git a/llvm/test/TableGen/DuplicateFieldValues.td b/llvm/test/TableGen/DuplicateFieldValues.td index 50c77fa88ccec..85cb5bbfb6c56 100644 --- a/llvm/test/TableGen/DuplicateFieldValues.td +++ b/llvm/test/TableGen/DuplicateFieldValues.td @@ -82,3 +82,4 @@ let BaseName = "0" in { def E0 : I, ABCRel, isEForm; } +defm : RemapAllTargetPseudoPointerOperands; diff --git a/llvm/test/TableGen/RegClassByHwMode.td b/llvm/test/TableGen/RegClassByHwMode.td index a21a396f7fd52..ec723f8b70478 100644 --- a/llvm/test/TableGen/RegClassByHwMode.td +++ b/llvm/test/TableGen/RegClassByHwMode.td @@ -13,6 +13,7 @@ include "llvm/Target/Target.td" // INSTRINFO-EMPTY: // INSTRINFO-NEXT: enum { // INSTRINFO-NEXT: PHI +// INSTRINFO: LOAD_STACK_GUARD = [[LOAD_STACK_GUARD_OPCODE:[0-9]+]] // INSTRINFO: }; // INSTRINFO: enum RegClassByHwModeUses : uint16_t { // INSTRINFO-NEXT: MyPtrRC, @@ -22,10 +23,20 @@ include "llvm/Target/Target.td" // INSTRINFO-EMPTY: // INSTRINFO-NEXT: } // namespace llvm::MyTarget + +// INSTRINFO: { [[LOAD_STACK_GUARD_OPCODE]], 1, 1, 0, 0, 0, 0, [[LOAD_STACK_GUARD_OP_INDEX:[0-9]+]], MyTargetImpOpBase + 0, 0|(1ULL<; +defm : RemapAllTargetPseudoPointerOperands; + def MyTargetISA : InstrInfo; def MyTarget : Target { let InstructionSet = MyTargetISA; } diff --git a/llvm/test/TableGen/def-multiple-operands.td b/llvm/test/TableGen/def-multiple-operands.td index 5d215056920e8..dc5ea09eff9ba 100644 --- a/llvm/test/TableGen/def-multiple-operands.td +++ b/llvm/test/TableGen/def-multiple-operands.td @@ -35,3 +35,5 @@ def InstA : Instruction { field bits<8> SoftFail = 0; let hasSideEffects = false; } + +defm : RemapAllTargetPseudoPointerOperands; diff --git a/llvm/test/TableGen/get-named-operand-idx.td b/llvm/test/TableGen/get-named-operand-idx.td index b3569510dd6fc..7982822c0a895 100644 --- a/llvm/test/TableGen/get-named-operand-idx.td +++ b/llvm/test/TableGen/get-named-operand-idx.td @@ -48,6 +48,8 @@ def InstD : InstBase { let UseNamedOperandTable = 0; } +defm : RemapAllTargetPseudoPointerOperands; + // CHECK-LABEL: #ifdef GET_INSTRINFO_OPERAND_ENUM // CHECK-NEXT: #undef GET_INSTRINFO_OPERAND_ENUM // CHECK-EMPTY: diff --git a/llvm/test/TableGen/get-operand-type-no-expand.td b/llvm/test/TableGen/get-operand-type-no-expand.td index a0a8fa957f9b6..fcaf3684528b2 100644 --- a/llvm/test/TableGen/get-operand-type-no-expand.td +++ b/llvm/test/TableGen/get-operand-type-no-expand.td @@ -46,3 +46,5 @@ def InstA : Instruction { // CHECK-NOEXPAND: /* InstA */ // CHECK-NOEXPAND-NEXT: i512complex, i8complex, i32imm, // CHECK-NOEXPAND: #endif // GET_INSTRINFO_OPERAND_TYPE + +defm : RemapAllTargetPseudoPointerOperands; diff --git a/llvm/test/TableGen/get-operand-type.td b/llvm/test/TableGen/get-operand-type.td index b2f63cafd6a89..49fbb63ac5974 100644 --- a/llvm/test/TableGen/get-operand-type.td +++ b/llvm/test/TableGen/get-operand-type.td @@ -18,6 +18,8 @@ def OpB : Operand; def RegOp : RegisterOperand; +defm : RemapAllTargetPseudoPointerOperands; + def InstA : Instruction { let Size = 1; let OutOperandList = (outs OpA:$a); diff --git a/llvm/test/TableGen/target-specialized-pseudos.td b/llvm/test/TableGen/target-specialized-pseudos.td index 99c63f3ec29d9..3953a36101fe0 100644 --- a/llvm/test/TableGen/target-specialized-pseudos.td +++ b/llvm/test/TableGen/target-specialized-pseudos.td @@ -1,6 +1,11 @@ -// RUN: llvm-tblgen -gen-instr-info -I %p/../../include %s -DONECASE -o - | FileCheck -check-prefixes=CHECK,ONECASE %s // RUN: llvm-tblgen -gen-instr-info -I %p/../../include %s -DALLCASES -o - | FileCheck -check-prefixes=CHECK,ALLCASES %s -// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DERROR -o /dev/null 2>&1 | FileCheck -check-prefix=ERROR %s +// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DONECASE -o /dev/null 2>&1 | FileCheck -check-prefixes=ERROR-MISSING %s +// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DMULTIPLE_OVERRIDE_ERROR -o /dev/null 2>&1 | FileCheck -implicit-check-not=error: -check-prefix=MULTIPLE-OVERRIDE-ERROR %s +// RUN: not llvm-tblgen -gen-instr-info -I %p/../../include %s -DALLCASES -DERROR_NONPSEUDO -o /dev/null 2>&1 | FileCheck -implicit-check-not=error: -check-prefix=ERROR-NONPSEUDO %s + + +// def PREALLOCATED_ARG : StandardPseudoInstruction { + // CHECK: namespace llvm::MyTarget { // CHECK: enum { @@ -20,8 +25,6 @@ // CHECK-NEXT: { [[MY_MOV_OPCODE]], 2, 1, 2, 0, 0, 0, {{[0-9]+}}, MyTargetImpOpBase + 0, 0|(1ULL<; #endif -#ifdef ERROR +#ifdef MULTIPLE_OVERRIDE_ERROR def MY_LOAD_STACK_GUARD_0 : TargetSpecializedStandardPseudoInstruction; -// ERROR: :[[@LINE+1]]:5: error: multiple overrides of 'LOAD_STACK_GUARD' defined +// MULTIPLE-OVERRIDE-ERROR: :[[@LINE+1]]:5: error: multiple overrides of 'LOAD_STACK_GUARD' defined def MY_LOAD_STACK_GUARD_1 : TargetSpecializedStandardPseudoInstruction; #endif +#ifdef ERROR_NONPSEUDO + +// FIXME: Double error +// ERROR-NONPSEUDO: [[@LINE+2]]:5: error: non-pseudoinstruction user of PointerLikeRegClass +// ERROR-NONPSEUDO: [[@LINE+1]]:5: error: non-pseudoinstruction user of PointerLikeRegClass +def NON_PSEUDO : TestInstruction { + let OutOperandList = (outs XRegs:$dst); + let InOperandList = (ins ptr_rc:$src); + let AsmString = "non_pseudo $dst, $src"; +} + +#endif + def MY_MOV : TestInstruction { let OutOperandList = (outs XRegs:$dst); let InOperandList = (ins XRegs:$src); diff --git a/llvm/test/Transforms/IndVarSimplify/skip-predication-convergence.ll b/llvm/test/Transforms/IndVarSimplify/skip-predication-convergence.ll new file mode 100644 index 0000000000000..59b84a3c082c2 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/skip-predication-convergence.ll @@ -0,0 +1,64 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=indvars -indvars-predicate-loops=1 -S | FileCheck %s + +; Loop with body using loop convergence token should be skipped by IndVarSimplify. + +declare token @llvm.experimental.convergence.entry() #0 + +define void @loop(i32 %tid, ptr %array) #0 { +; CHECK-LABEL: @loop( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = tail call token @llvm.experimental.convergence.entry() +; CHECK-NEXT: br label [[FOR_COND_I:%.*]] +; CHECK: for.cond.i: +; CHECK-NEXT: [[I_0_I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC_I:%.*]], [[FOR_BODY_I:%.*]] ] +; CHECK-NEXT: [[TMP1:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[TMP0]]) ] +; CHECK-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[I_0_I]], 8 +; CHECK-NEXT: br i1 [[CMP_I]], label [[FOR_BODY_I]], label [[EXIT_LOOPEXIT:%.*]] +; CHECK: for.body.i: +; CHECK-NEXT: [[CMP1_I:%.*]] = icmp eq i32 [[I_0_I]], [[TID:%.*]] +; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I_0_I]], 1 +; CHECK-NEXT: br i1 [[CMP1_I]], label [[IF_THEN_I:%.*]], label [[FOR_COND_I]] +; CHECK: exit.loopexit: +; CHECK-NEXT: br label [[EXIT:%.*]] +; CHECK: if.then.i: +; CHECK-NEXT: [[HLSL_WAVE_ACTIVE_MAX2_I:%.*]] = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 [[TID]]) [ "convergencectrl"(token [[TMP1]]) ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i32 [[TID]] +; CHECK-NEXT: store i32 [[HLSL_WAVE_ACTIVE_MAX2_I]], ptr [[TMP2]], align 4 +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + %0 = tail call token @llvm.experimental.convergence.entry() + br label %for.cond.i + +for.cond.i: + %i.0.i = phi i32 [ 0, %entry ], [ %inc.i, %for.body.i ] + %2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %0) ] + %cmp.i = icmp ult i32 %i.0.i, 8 + br i1 %cmp.i, label %for.body.i, label %exit.loopexit + +for.body.i: + %cmp1.i = icmp eq i32 %i.0.i, %tid + %inc.i = add nuw nsw i32 %i.0.i, 1 + br i1 %cmp1.i, label %if.then.i, label %for.cond.i + +exit.loopexit: + br label %exit + +if.then.i: + %hlsl.wave.active.max2.i = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 %tid) [ "convergencectrl"(token %2) ] + %3 = getelementptr inbounds i32, ptr %array, i32 %tid + store i32 %hlsl.wave.active.max2.i, ptr %3, align 4 + br label %exit + +exit: + ret void +} + +declare token @llvm.experimental.convergence.loop() #0 + +declare i32 @llvm.spv.wave.reduce.umax.i32(i32) #0 + +attributes #0 = { convergent } diff --git a/llvm/test/Transforms/IndVarSimplify/skip-predication-nested-convergence.ll b/llvm/test/Transforms/IndVarSimplify/skip-predication-nested-convergence.ll new file mode 100644 index 0000000000000..0944205839aca --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/skip-predication-nested-convergence.ll @@ -0,0 +1,95 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=indvars -indvars-predicate-loops=1 -S | FileCheck %s + +; Nested loops with body using loop convergence token should be skipped by IndVarSimplify. + +declare token @llvm.experimental.convergence.entry() #0 + +define void @nested(i32 %tidx, i32 %tidy, ptr %array) #0 { +; CHECK-LABEL: @nested( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = tail call token @llvm.experimental.convergence.entry() +; CHECK-NEXT: [[MUL_I:%.*]] = shl nsw i32 [[TIDX:%.*]], 3 +; CHECK-NEXT: [[ADD_I:%.*]] = add nsw i32 [[MUL_I]], [[TIDY:%.*]] +; CHECK-NEXT: br label [[FOR_COND_I:%.*]] +; CHECK: for.cond.i: +; CHECK-NEXT: [[I_0_I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC10_I:%.*]], [[CLEANUP_I:%.*]] ] +; CHECK-NEXT: [[TMP1:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[TMP0]]) ] +; CHECK-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[I_0_I]], 8 +; CHECK-NEXT: br i1 [[CMP_I]], label [[FOR_COND1_I_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK: for.cond1.i.preheader: +; CHECK-NEXT: [[CMP5_I:%.*]] = icmp eq i32 [[I_0_I]], [[TIDX]] +; CHECK-NEXT: br label [[FOR_COND1_I:%.*]] +; CHECK: for.cond1.i: +; CHECK-NEXT: [[J_0_I:%.*]] = phi i32 [ [[INC_I:%.*]], [[FOR_BODY4_I:%.*]] ], [ 0, [[FOR_COND1_I_PREHEADER]] ] +; CHECK-NEXT: [[TMP2:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[TMP1]]) ] +; CHECK-NEXT: [[CMP2_I:%.*]] = icmp ult i32 [[J_0_I]], 8 +; CHECK-NEXT: br i1 [[CMP2_I]], label [[FOR_BODY4_I]], label [[CLEANUP_I_LOOPEXIT:%.*]] +; CHECK: for.body4.i: +; CHECK-NEXT: [[CMP6_I:%.*]] = icmp eq i32 [[J_0_I]], [[TIDY]] +; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP5_I]], i1 [[CMP6_I]], i1 false +; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[J_0_I]], 1 +; CHECK-NEXT: br i1 [[OR_COND]], label [[IF_THEN_I:%.*]], label [[FOR_COND1_I]] +; CHECK: cleanup.i.loopexit: +; CHECK-NEXT: br label [[CLEANUP_I]] +; CHECK: if.then.i: +; CHECK-NEXT: [[HLSL_WAVE_ACTIVE_MAX7_I:%.*]] = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 [[ADD_I]]) [ "convergencectrl"(token [[TMP2]]) ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i32 [[ADD_I]] +; CHECK-NEXT: store i32 [[HLSL_WAVE_ACTIVE_MAX7_I]], ptr [[TMP3]], align 4 +; CHECK-NEXT: br label [[CLEANUP_I]] +; CHECK: cleanup.i: +; CHECK-NEXT: [[INC10_I]] = add nuw nsw i32 [[I_0_I]], 1 +; CHECK-NEXT: br label [[FOR_COND_I]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + %0 = tail call token @llvm.experimental.convergence.entry() + %mul.i = shl nsw i32 %tidx, 3 + %add.i = add nsw i32 %mul.i, %tidy + br label %for.cond.i + +for.cond.i: + %i.0.i = phi i32 [ 0, %entry ], [ %inc10.i, %cleanup.i ] + %2 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %0) ] + %cmp.i = icmp ult i32 %i.0.i, 8 + br i1 %cmp.i, label %for.cond1.i.preheader, label %exit + +for.cond1.i.preheader: + %cmp5.i = icmp eq i32 %i.0.i, %tidx + br label %for.cond1.i + +for.cond1.i: + %j.0.i = phi i32 [ %inc.i, %for.body4.i ], [ 0, %for.cond1.i.preheader ] + %3 = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %2) ] + %cmp2.i = icmp ult i32 %j.0.i, 8 + br i1 %cmp2.i, label %for.body4.i, label %cleanup.i.loopexit + +for.body4.i: + %cmp6.i = icmp eq i32 %j.0.i, %tidy + %or.cond = select i1 %cmp5.i, i1 %cmp6.i, i1 false + %inc.i = add nsw i32 %j.0.i, 1 + br i1 %or.cond, label %if.then.i, label %for.cond1.i + +cleanup.i.loopexit: + br label %cleanup.i + +if.then.i: + %hlsl.wave.active.max7.i = call spir_func i32 @llvm.spv.wave.reduce.umax.i32(i32 %add.i) [ "convergencectrl"(token %3) ] + %4 = getelementptr inbounds i32, ptr %array, i32 %add.i + store i32 %hlsl.wave.active.max7.i, ptr %4, align 4 + br label %cleanup.i + +cleanup.i: + %inc10.i = add nsw i32 %i.0.i, 1 + br label %for.cond.i + +exit: + ret void +} + +declare token @llvm.experimental.convergence.loop() #0 + +declare i32 @llvm.spv.wave.reduce.umax.i32(i32) #0 + +attributes #0 = { convergent } diff --git a/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll b/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll index 208e162ac9416..22c330fe7ae61 100644 --- a/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/ptrauth-intrinsics.ll @@ -160,6 +160,43 @@ define i64 @test_ptrauth_resign_ptrauth_constant(ptr %p) { ret i64 %authed } +@ds = external global i8 + +define i64 @test_ptrauth_nop_ds1(ptr %p) { +; CHECK-LABEL: @test_ptrauth_nop_ds1( +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[SIGNED:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[TMP0]], i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] +; CHECK-NEXT: [[AUTHED:%.*]] = call i64 @llvm.ptrauth.auth(i64 [[SIGNED]], i32 1, i64 1234) +; CHECK-NEXT: ret i64 [[AUTHED]] +; + %tmp0 = ptrtoint ptr %p to i64 + %signed = call i64 @llvm.ptrauth.sign(i64 %tmp0, i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] + %authed = call i64 @llvm.ptrauth.auth(i64 %signed, i32 1, i64 1234) + ret i64 %authed +} + +define i64 @test_ptrauth_nop_ds2(ptr %p) { +; CHECK-LABEL: @test_ptrauth_nop_ds2( +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64 +; CHECK-NEXT: [[SIGNED:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[TMP0]], i32 1, i64 1234) +; CHECK-NEXT: [[AUTHED:%.*]] = call i64 @llvm.ptrauth.auth(i64 [[SIGNED]], i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] +; CHECK-NEXT: ret i64 [[AUTHED]] +; + %tmp0 = ptrtoint ptr %p to i64 + %signed = call i64 @llvm.ptrauth.sign(i64 %tmp0, i32 1, i64 1234) + %authed = call i64 @llvm.ptrauth.auth(i64 %signed, i32 1, i64 1234) [ "deactivation-symbol"(ptr @ds) ] + ret i64 %authed +} + +define i64 @test_ptrauth_nop_ds_constant() { +; CHECK-LABEL: @test_ptrauth_nop_ds_constant( +; CHECK-NEXT: [[AUTHED:%.*]] = call i64 @llvm.ptrauth.auth(i64 ptrtoint (ptr ptrauth (ptr @foo, i32 1, i64 1234, ptr null, ptr @ds) to i64), i32 1, i64 1234) +; CHECK-NEXT: ret i64 [[AUTHED]] +; + %authed = call i64 @llvm.ptrauth.auth(i64 ptrtoint(ptr ptrauth(ptr @foo, i32 1, i64 1234, ptr null, ptr @ds) to i64), i32 1, i64 1234) + ret i64 %authed +} + declare i64 @llvm.ptrauth.auth(i64, i32, i64) declare i64 @llvm.ptrauth.sign(i64, i32, i64) declare i64 @llvm.ptrauth.resign(i64, i32, i64, i32, i64) diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll index 55b511fd51a2b..802795da47894 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vectorize-redund-loads.ll @@ -1,6 +1,33 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s +define void @onevec(ptr %ptr) { +; CHECK-LABEL: define void @onevec( +; CHECK-SAME: ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[PTR]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to <1 x i32> +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP3]] to <1 x i32> +; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 32 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[GEP2]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[TMP5]] to <1 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32 [[TMP5]] to <1 x i32> +; CHECK-NEXT: ret void +; + %ld0 = load <1 x i32>, ptr %ptr, align 4 + %ld1 = load i32, ptr %ptr, align 4 + + %gep1 = getelementptr inbounds i8, ptr %ptr, i32 16 + %ld2 = load i32, ptr %gep1, align 4 + %ld3 = load <1 x i32>, ptr %gep1, align 4 + + %gep2 = getelementptr inbounds i8, ptr %ptr, i32 32 + %ld4 = load <1 x i32>, ptr %gep2, align 4 + %ld5 = load <1 x i32>, ptr %gep2, align 4 + ret void +} + define void @test(ptr %ptr) { ; CHECK-LABEL: define void @test( ; CHECK-SAME: ptr [[PTR:%.*]]) { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll index 23918427e7003..95b4dcb23dd47 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll @@ -14,7 +14,7 @@ define void @fshl_operand_first_order_recurrence(ptr %dst, ptr noalias %src) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <2 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[VECTOR_RECUR]], <2 x i64> [[WIDE_LOAD]], <2 x i32> @@ -22,7 +22,7 @@ define void @fshl_operand_first_order_recurrence(ptr %dst, ptr noalias %src) { ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> splat (i64 1), <2 x i64> [[TMP6]], <2 x i64> splat (i64 1)) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> splat (i64 1), <2 x i64> [[TMP7]], <2 x i64> splat (i64 1)) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x i64> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP13]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index 9609982b2c68f..118f165050602 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -63,7 +63,7 @@ define void @loop_dependent_cond(ptr %src, ptr noalias %dst, i64 %N) { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ] ; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[TMP3]], i32 2 +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[TMP3]], i64 2 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP3]], align 8 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x double>, ptr [[TMP6]], align 8 ; DEFAULT-NEXT: [[TMP7:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[WIDE_LOAD]]) @@ -259,7 +259,7 @@ define void @latch_branch_cost(ptr %dst) { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i32 16 +; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 16 ; DEFAULT-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1 ; DEFAULT-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP5]], align 1 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -522,25 +522,47 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; DEFAULT-LABEL: define void @multiple_exit_conditions( ; DEFAULT-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR2:[0-9]+]] { ; DEFAULT-NEXT: [[ENTRY:.*:]] -; DEFAULT-NEXT: br label %[[VECTOR_PH:.*]] +; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 4 +; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 257, [[TMP3]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; DEFAULT: [[VECTOR_PH]]: -; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 2048 +; DEFAULT-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16 +; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 257, [[TMP5]] +; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 257, [[N_MOD_VF]] +; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[N_VEC]], 8 +; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] +; DEFAULT-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 2 ; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]] ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 -; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] +; DEFAULT-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 8 +; DEFAULT-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX1]] ; DEFAULT-NEXT: [[TMP1:%.*]] = load i16, ptr [[SRC]], align 2 -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[TMP1]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP2:%.*]] = or <8 x i16> [[BROADCAST_SPLAT]], splat (i16 1) -; DEFAULT-NEXT: [[TMP3:%.*]] = uitofp <8 x i16> [[TMP2]] to <8 x double> -; DEFAULT-NEXT: store <8 x double> [[TMP3]], ptr [[NEXT_GEP]], align 8 -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; DEFAULT-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; DEFAULT-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[TMP1]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; DEFAULT-NEXT: [[TMP8:%.*]] = or [[BROADCAST_SPLAT]], splat (i16 1) +; DEFAULT-NEXT: [[TMP9:%.*]] = uitofp [[TMP8]] to +; DEFAULT-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 2 +; DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[NEXT_GEP1]], i64 [[TMP11]] +; DEFAULT-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP14:%.*]] = shl nuw i64 [[TMP13]], 3 +; DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[NEXT_GEP1]], i64 [[TMP14]] +; DEFAULT-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 12 +; DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[NEXT_GEP1]], i64 [[TMP17]] +; DEFAULT-NEXT: store [[TMP9]], ptr [[NEXT_GEP1]], align 8 +; DEFAULT-NEXT: store [[TMP9]], ptr [[TMP12]], align 8 +; DEFAULT-NEXT: store [[TMP9]], ptr [[TMP15]], align 8 +; DEFAULT-NEXT: store [[TMP9]], ptr [[TMP18]], align 8 +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; DEFAULT-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; DEFAULT-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: -; DEFAULT-NEXT: br label %[[SCALAR_PH:.*]] +; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 257, [[N_VEC]] +; DEFAULT-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; DEFAULT: [[SCALAR_PH]]: ; ; PRED-LABEL: define void @multiple_exit_conditions( @@ -549,28 +571,28 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED-NEXT: br label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 1 +; PRED-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 2 ; PRED-NEXT: [[TMP8:%.*]] = sub i64 257, [[TMP7]] ; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 257, [[TMP7]] ; PRED-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i64 [[TMP8]], i64 0 -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 257) +; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 257) ; PRED-NEXT: br label %[[VECTOR_BODY:.*]] ; PRED: [[VECTOR_BODY]]: ; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; PRED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; PRED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] ; PRED-NEXT: [[TMP12:%.*]] = load i16, ptr [[SRC]], align 2 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[TMP12]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[TMP13:%.*]] = or [[BROADCAST_SPLAT]], splat (i16 1) -; PRED-NEXT: [[TMP14:%.*]] = uitofp [[TMP13]] to -; PRED-NEXT: call void @llvm.masked.store.nxv2f64.p0( [[TMP14]], ptr align 8 [[NEXT_GEP]], [[ACTIVE_LANE_MASK]]) +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[TMP12]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; PRED-NEXT: [[TMP11:%.*]] = or [[BROADCAST_SPLAT]], splat (i16 1) +; PRED-NEXT: [[TMP13:%.*]] = uitofp [[TMP11]] to +; PRED-NEXT: call void @llvm.masked.store.nxv4f64.p0( [[TMP13]], ptr align 8 [[NEXT_GEP]], [[ACTIVE_LANE_MASK]]) ; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP10]]) -; PRED-NEXT: [[TMP15:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP10]]) +; PRED-NEXT: [[TMP15:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; PRED-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true ; PRED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll index 0a433ec76acf4..f0664197dcb94 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll @@ -137,7 +137,7 @@ define void @test_shrink_zext_in_preheader(ptr noalias %src, ptr noalias %dst, i ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP4]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP5]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32 @@ -210,7 +210,7 @@ define void @test_shrink_select(ptr noalias %src, ptr noalias %dst, i32 %A, i1 % ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP5]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP6]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 32 @@ -279,7 +279,7 @@ define void @trunc_invariant_sdiv_result(i32 %a, i32 %b, ptr noalias %src, ptr % ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> @@ -287,7 +287,7 @@ define void @trunc_invariant_sdiv_result(i32 %a, i32 %b, ptr noalias %src, ptr % ; CHECK-NEXT: [[TMP5:%.*]] = mul <16 x i16> [[TMP0]], [[TMP3]] ; CHECK-NEXT: [[TMP6:%.*]] = mul <16 x i16> [[TMP0]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP7]], i32 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP7]], i64 16 ; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP7]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP6]], ptr [[TMP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -413,7 +413,7 @@ define void @old_and_new_size_equalko(ptr noalias %src, ptr noalias %dst) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i32 4 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[WIDE_LOAD]] to <4 x i1> @@ -427,15 +427,16 @@ define void @old_and_new_size_equalko(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP10:%.*]] = trunc <4 x i64> [[TMP8]] to <4 x i32> ; CHECK-NEXT: [[TMP11:%.*]] = trunc <4 x i64> [[TMP9]] to <4 x i32> ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 4 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 4 ; CHECK-NEXT: store <4 x i32> [[TMP10]], ptr [[TMP12]], align 4 ; CHECK-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP13]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br [[EXIT:label %.*]] -; CHECK: [[SCALAR_PH:.*:]] +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll index 2180f18750bf2..580c568c373f1 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll @@ -27,7 +27,7 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <16 x i8> [[VEC_IND]], splat (i8 16) ; CHECK-NEXT: [[INDEX4:%.*]] = trunc i32 [[INDEX]] to i8 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i8 [[INDEX4]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 8 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <16 x i8> [[WIDE_LOAD]], splat (i8 3) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll index a3b7392dd280f..549df337e6907 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll @@ -19,17 +19,17 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 48 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 32 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[C:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 16 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 32 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 48 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 @@ -39,9 +39,9 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: [[TMP13:%.*]] = add <16 x i8> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP14:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i32 16 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i32 32 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i32 48 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 16 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 32 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 48 ; CHECK-NEXT: store <16 x i8> [[TMP11]], ptr [[TMP15]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP12]], ptr [[TMP17]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP13]], ptr [[TMP18]], align 1 @@ -54,7 +54,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[ITERATIONS]], 8 @@ -71,7 +71,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: store <8 x i8> [[TMP26]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX11]], 8 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC10]] -; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC10]] ; CHECK-NEXT: br i1 [[CMP_N15]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -89,7 +89,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX6]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[ITERATIONS]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -128,17 +128,17 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 16 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 24 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i64 16 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[TMP4]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[C:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 16 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 24 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 16 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP9]], align 1 @@ -148,22 +148,22 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: [[TMP13:%.*]] = add <8 x i16> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP14:%.*]] = add <8 x i16> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 8 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 16 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 24 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i64 8 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i64 16 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i64 24 ; CHECK-NEXT: store <8 x i16> [[TMP11]], ptr [[TMP15]], align 1 ; CHECK-NEXT: store <8 x i16> [[TMP12]], ptr [[TMP17]], align 1 ; CHECK-NEXT: store <8 x i16> [[TMP13]], ptr [[TMP18]], align 1 ; CHECK-NEXT: store <8 x i16> [[TMP14]], ptr [[TMP19]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[ITERATIONS]], 4 @@ -180,7 +180,7 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store <4 x i16> [[TMP26]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX11]], 4 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC10]] -; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC10]] ; CHECK-NEXT: br i1 [[CMP_N15]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -198,7 +198,7 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX6]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[ITERATIONS]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -237,17 +237,17 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 12 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP4]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP9]], align 1 @@ -257,22 +257,22 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: [[TMP13:%.*]] = add <4 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP14:%.*]] = add <4 x i32> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 4 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 8 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 12 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 4 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 8 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 12 ; CHECK-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP15]], align 1 ; CHECK-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP17]], align 1 ; CHECK-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP18]], align 1 ; CHECK-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP19]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[ITERATIONS]], 4 @@ -289,7 +289,7 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX11]], 4 ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC10]] -; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC10]] ; CHECK-NEXT: br i1 [[CMP_N15]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -307,7 +307,7 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX6]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[ITERATIONS]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -347,9 +347,9 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i32 16 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i32 32 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i32 48 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 32 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARG]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[ARG]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 @@ -358,9 +358,9 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i8> [[WIDE_LOAD4]], splat (i8 10) ; CHECK-NEXT: [[TMP6:%.*]] = add <16 x i8> [[WIDE_LOAD5]], splat (i8 10) ; CHECK-NEXT: [[TMP7:%.*]] = add <16 x i8> [[WIDE_LOAD6]], splat (i8 10) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i32 16 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i32 32 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i32 48 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i64 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i64 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[ARG2]], i64 48 ; CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[ARG2]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP9]], align 1 @@ -369,7 +369,7 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK: middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF14:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i32 [ 0, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -382,7 +382,7 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK-NEXT: store <16 x i8> [[TMP12]], ptr [[TMP13]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 -; CHECK-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -397,7 +397,7 @@ define void @small_trip_count_loop(ptr %arg, ptr %arg2) { ; CHECK-NEXT: store i8 [[SELECT]], ptr [[GEP_B]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 20 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -447,16 +447,16 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP7]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 12 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 12 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP8]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP9]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP10]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] @@ -464,7 +464,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[N_VEC]], 4 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP12]] ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF1:%.*]] = urem i64 [[TMP2]], 4 @@ -479,7 +479,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[NEXT_GEP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX3]], 4 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC2]] -; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC2]] ; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -491,7 +491,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK-NEXT: store i32 0, ptr [[IV]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = getelementptr i8, ptr [[IV]], i64 4 ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV]], [[PTR_END]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll index 5e92123891b31..85726c161cc54 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll @@ -29,7 +29,7 @@ define void @test_widen_ptr_induction(ptr %ptr.start.1) { ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP12]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]]) -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 2 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 2 ; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[NEXT_GEP]], align 1 ; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP15]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -117,7 +117,7 @@ define void @test_widen_induction(ptr %A, i64 %N) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i64> [[VEC_IND]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <2 x i64> [[STEP_ADD]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -201,7 +201,7 @@ define void @test_widen_induction_variable_start(ptr %A, i64 %N, i64 %start) { ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[START]], [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 2 ; CHECK-NEXT: store <2 x i64> [[VEC_IND]], ptr [[TMP2]], align 4 ; CHECK-NEXT: store <2 x i64> [[STEP_ADD]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -285,7 +285,7 @@ define void @test_widen_induction_step_2(ptr %A, i64 %N, i32 %step) { ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 10) ; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i64> [[STEP_ADD]], splat (i64 10) -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <2 x i64> [[TMP3]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -399,7 +399,7 @@ define void @test_widen_truncated_induction(ptr %A) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i8> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i8> [[VEC_IND]], splat (i8 2) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i8> [[VEC_IND]], ptr [[TMP1]], align 1 ; CHECK-NEXT: store <2 x i8> [[STEP_ADD]], ptr [[TMP3]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll index cb4e99332c04b..4eacc55a99f72 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll @@ -20,7 +20,7 @@ define void @epilogue_vectorization_fix_scalar_resume_values(ptr %dst, i64 %n) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP0]], align 1 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll index 35d7e2cc8c586..feb0175e75542 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll @@ -21,16 +21,16 @@ define double @fp128_fmuladd_reduction(ptr %start0, ptr %start1, ptr %end0, ptr ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[START0]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[START1]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr fp128, ptr [[TMP1]], i32 2 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr fp128, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr fp128, ptr [[TMP1]], i32 6 +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr fp128, ptr [[TMP1]], i64 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr fp128, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr fp128, ptr [[TMP1]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x fp128>, ptr [[TMP1]], align 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x fp128>, ptr [[TMP24]], align 16 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x fp128>, ptr [[TMP4]], align 16 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x fp128>, ptr [[TMP5]], align 16 -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP3]], i32 4 -; CHECK-NEXT: [[TMP36:%.*]] = getelementptr double, ptr [[TMP3]], i32 6 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP3]], i64 2 +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP3]], i64 4 +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr double, ptr [[TMP3]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP3]], align 16 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP28]], align 16 ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x double>, ptr [[TMP35]], align 16 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll index c94b3a4c49555..c692ba5b06690 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-order-recurrence.ll @@ -26,7 +26,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -34,7 +34,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP9:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[TMP7]] ; CHECK-NEXT: [[TMP10:%.*]] = add <16 x i8> [[WIDE_LOAD1]], [[TMP8]] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP9]], ptr [[TMP11]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP14]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -119,7 +119,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR4:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT3]], [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -135,7 +135,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP17:%.*]] = add <16 x i8> [[TMP15]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP18:%.*]] = add <16 x i8> [[TMP16]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i32 16 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP17]], ptr [[TMP19]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP18]], ptr [[TMP22]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll index faee4c1194018..591bdabca65e7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll @@ -56,9 +56,9 @@ define void @fixed_wide_active_lane_mask(ptr noalias %dst, ptr noalias readonly ; CHECK-UF4-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0 ; CHECK-UF4-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-UF4-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDEX]] -; CHECK-UF4-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 -; CHECK-UF4-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 8 -; CHECK-UF4-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 12 +; CHECK-UF4-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 4 +; CHECK-UF4-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 8 +; CHECK-UF4-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 12 ; CHECK-UF4-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT]], ptr align 4 [[TMP8]], <4 x i1> [[ACTIVE_LANE_MASK]]) ; CHECK-UF4-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT]], ptr align 4 [[TMP17]], <4 x i1> [[ACTIVE_LANE_MASK4]]) ; CHECK-UF4-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT]], ptr align 4 [[TMP18]], <4 x i1> [[ACTIVE_LANE_MASK5]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll index 6902dd990509e..a04367f32dd01 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll @@ -53,7 +53,7 @@ define float @fmaxnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) @@ -128,10 +128,10 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll index 193424d3eb70a..0bddc498f9e83 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll @@ -53,7 +53,7 @@ define float @fminnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll index f15f04fe5f6f2..3a9d5c34bacab 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fminimumnum.ll @@ -20,17 +20,17 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -97,17 +97,17 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -174,17 +174,17 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -251,17 +251,17 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -328,17 +328,17 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i32 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[TMP2]], align 2 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x half>, ptr [[TMP6]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x half>, ptr [[TMP4]], align 2 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x half>, ptr [[TMP10]], align 2 ; CHECK-NEXT: [[TMP11:%.*]] = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> [[WIDE_LOAD]], <8 x half> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP13:%.*]] = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> [[WIDE_LOAD5]], <8 x half> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i32 8 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i64 8 ; CHECK-NEXT: store <8 x half> [[TMP11]], ptr [[TMP7]], align 2 ; CHECK-NEXT: store <8 x half> [[TMP13]], ptr [[TMP12]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -405,17 +405,17 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i32 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw half, ptr [[TMP2]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[TMP2]], align 2 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x half>, ptr [[TMP6]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw half, ptr [[TMP4]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x half>, ptr [[TMP4]], align 2 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x half>, ptr [[TMP10]], align 2 ; CHECK-NEXT: [[TMP11:%.*]] = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> [[WIDE_LOAD]], <8 x half> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP13:%.*]] = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> [[WIDE_LOAD5]], <8 x half> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i32 8 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw half, ptr [[TMP7]], i64 8 ; CHECK-NEXT: store <8 x half> [[TMP11]], ptr [[TMP7]], align 2 ; CHECK-NEXT: store <8 x half> [[TMP13]], ptr [[TMP12]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll index 56edee44fe3b1..21b21774d18cf 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll @@ -62,7 +62,7 @@ define void @test_iv_cost(ptr %ptr.start, i8 %a, i64 %b) { ; COST1: [[VECTOR_BODY]]: ; COST1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COST1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[INDEX]] -; COST1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 +; COST1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 ; COST1-NEXT: store <16 x i8> zeroinitializer, ptr [[NEXT_GEP]], align 1 ; COST1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP0]], align 1 ; COST1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -328,7 +328,7 @@ define void @invalid_legacy_cost(i64 %N, ptr %x) #0 { ; COST1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP1]], i64 0 ; COST1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT]], <2 x ptr> poison, <2 x i32> zeroinitializer ; COST1-NEXT: [[TMP2:%.*]] = getelementptr ptr, ptr [[X]], i64 [[INDEX]] -; COST1-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[TMP2]], i32 2 +; COST1-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[TMP2]], i64 2 ; COST1-NEXT: store <2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; COST1-NEXT: store <2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; COST1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll index 1164778c19070..f645db16ed3c6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll @@ -50,7 +50,7 @@ define i64 @test_external_iv_user(ptr %a, ptr %b) #0 { ; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %i.iv = phi i64 [ 0, %entry ], [ %i.iv.next, %for.body ] ; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.+}}> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK: Cost for VF 16: 57 -; CHECK: LV: Selecting VF: vscale x 2 +; CHECK: LV: Selecting VF: 16 entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll index 42a1940925968..7b42e565e127d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll @@ -25,7 +25,7 @@ define i32 @multi_exit_iv_uniform(i32 %a, i64 %N, ptr %dst) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i64 4 ; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr [[TMP5]], align 8 ; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr [[TMP9]], align 8 ; CHECK-NEXT: [[TMP10]] = add <4 x i32> [[VEC_PHI]], splat (i32 -1) @@ -106,7 +106,7 @@ define i64 @pointer_induction_only(ptr %start, ptr %end) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x i32>, ptr [[TMP7]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = zext <2 x i32> [[WIDE_LOAD4]] to <2 x i64> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -161,7 +161,7 @@ define i64 @int_and_pointer_iv(ptr %start, i32 %N) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i32> [[WIDE_LOAD3]] to <4 x i64> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -205,7 +205,7 @@ define void @wide_truncated_iv(ptr %dst) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <8 x i8> [ , [[VECTOR_PH1]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <8 x i8> [[VEC_IND]], splat (i8 8) ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i32 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 8 ; CHECK-NEXT: store <8 x i8> [[VEC_IND]], ptr [[TMP2]], align 1 ; CHECK-NEXT: store <8 x i8> [[STEP_ADD]], ptr [[TMP5]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -287,7 +287,7 @@ define i64 @test_ptr_ivs_and_widened_ivs(ptr %src, i32 %N) { ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4 ; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i32> [[WIDE_LOAD]], splat (i32 1) ; CHECK-NEXT: [[TMP8:%.*]] = zext <4 x i32> [[TMP7]] to <4 x i64> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll index 0c91661d20ae7..5b4bb70e6a479 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll @@ -3,9 +3,9 @@ target triple = "aarch64-linux-gnu" -; Original loop has trip count 16, but contains interleave groups with gaps, so +; Original loop has trip count 17, but contains interleave groups with gaps, so ; the last iteration must execute in the scalar loop. Thus the vector loop can -; only execute up to 15 iterations. +; only execute up to 16 iterations. define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i32 %x) #0 { ; CHECK-LABEL: define i64 @vector_loop_with_remaining_iterations( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll index 9b4151f30d640..f7060ec3512ac 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll @@ -35,9 +35,9 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4: vector.body: ; INTERLEAVE-4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; INTERLEAVE-4-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 -; INTERLEAVE-4-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 32 -; INTERLEAVE-4-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 48 +; INTERLEAVE-4-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 16 +; INTERLEAVE-4-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 32 +; INTERLEAVE-4-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 48 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 @@ -55,9 +55,9 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: [[TMP23:%.*]] = select <16 x i1> [[TMP15]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP19]] ; INTERLEAVE-4-NEXT: [[TMP24:%.*]] = select <16 x i1> [[TMP16]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP20]] ; INTERLEAVE-4-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] -; INTERLEAVE-4-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i32 16 -; INTERLEAVE-4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i32 32 -; INTERLEAVE-4-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i32 48 +; INTERLEAVE-4-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i64 16 +; INTERLEAVE-4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i64 32 +; INTERLEAVE-4-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP25]], i64 48 ; INTERLEAVE-4-NEXT: store <16 x i8> [[TMP21]], ptr [[TMP25]], align 1 ; INTERLEAVE-4-NEXT: store <16 x i8> [[TMP22]], ptr [[TMP30]], align 1 ; INTERLEAVE-4-NEXT: store <16 x i8> [[TMP23]], ptr [[TMP31]], align 1 @@ -70,7 +70,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-4: vec.epilog.iter.check: ; INTERLEAVE-4-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; INTERLEAVE-4-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; INTERLEAVE-4-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; INTERLEAVE-4: vec.epilog.ph: ; INTERLEAVE-4-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; INTERLEAVE-4-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[N]], 8 @@ -91,7 +91,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: store <8 x i8> [[TMP39]], ptr [[TMP40]], align 1 ; INTERLEAVE-4-NEXT: [[INDEX_NEXT18]] = add nuw i64 [[INDEX12]], 8 ; INTERLEAVE-4-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT18]], [[N_VEC10]] -; INTERLEAVE-4-NEXT: br i1 [[TMP42]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; INTERLEAVE-4-NEXT: br i1 [[TMP42]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; INTERLEAVE-4: vec.epilog.middle.block: ; INTERLEAVE-4-NEXT: [[CMP_N11:%.*]] = icmp eq i64 [[N]], [[N_VEC10]] ; INTERLEAVE-4-NEXT: br i1 [[CMP_N11]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -109,7 +109,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: store i8 [[SEL]], ptr [[GEP_DST]], align 1 ; INTERLEAVE-4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; INTERLEAVE-4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; INTERLEAVE-4-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; INTERLEAVE-4-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; INTERLEAVE-4: exit: ; INTERLEAVE-4-NEXT: ret void ; @@ -137,7 +137,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2: vector.body: ; INTERLEAVE-2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; INTERLEAVE-2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; INTERLEAVE-2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; INTERLEAVE-2-NEXT: [[TMP7:%.*]] = icmp sgt <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] @@ -147,7 +147,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: [[TMP11:%.*]] = select <16 x i1> [[TMP7]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP9]] ; INTERLEAVE-2-NEXT: [[TMP12:%.*]] = select <16 x i1> [[TMP8]], <16 x i8> [[BROADCAST_SPLAT]], <16 x i8> [[TMP10]] ; INTERLEAVE-2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDEX]] -; INTERLEAVE-2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i32 16 +; INTERLEAVE-2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i64 16 ; INTERLEAVE-2-NEXT: store <16 x i8> [[TMP11]], ptr [[TMP13]], align 1 ; INTERLEAVE-2-NEXT: store <16 x i8> [[TMP12]], ptr [[TMP16]], align 1 ; INTERLEAVE-2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -158,7 +158,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-2: vec.epilog.iter.check: ; INTERLEAVE-2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; INTERLEAVE-2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; INTERLEAVE-2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; INTERLEAVE-2: vec.epilog.ph: ; INTERLEAVE-2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; INTERLEAVE-2-NEXT: [[N_MOD_VF7:%.*]] = urem i64 [[N]], 8 @@ -179,7 +179,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: store <8 x i8> [[TMP23]], ptr [[TMP24]], align 1 ; INTERLEAVE-2-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX10]], 8 ; INTERLEAVE-2-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC8]] -; INTERLEAVE-2-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; INTERLEAVE-2-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; INTERLEAVE-2: vec.epilog.middle.block: ; INTERLEAVE-2-NEXT: [[CMP_N9:%.*]] = icmp eq i64 [[N]], [[N_VEC8]] ; INTERLEAVE-2-NEXT: br i1 [[CMP_N9]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll index aa94763b44a30..53cb0653fd241 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll @@ -29,9 +29,9 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-4-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-4-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] -; INTERLEAVE-4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4 -; INTERLEAVE-4-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 8 -; INTERLEAVE-4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 12 +; INTERLEAVE-4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 4 +; INTERLEAVE-4-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 8 +; INTERLEAVE-4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 12 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP1]], align 1 ; INTERLEAVE-4-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP2]], align 1 @@ -103,7 +103,7 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-2-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-2-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]] -; INTERLEAVE-2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4 +; INTERLEAVE-2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 4 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 1 ; INTERLEAVE-2-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 1 ; INTERLEAVE-2-NEXT: [[TMP2]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll index ee3a4a04566c9..3eb42845bec4a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll @@ -36,12 +36,12 @@ define void @saddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[OFFSET_IDX2]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD]], <8 x i16> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> [[WIDE_LOAD4]], <8 x i16> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 8 ; CHECK-NEXT: store <8 x i16> [[TMP2]], ptr [[NEXT_GEP3]], align 2 ; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -160,12 +160,12 @@ define void @umin(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[WIDE_LOAD]], <16 x i8> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.umin.v16i8(<16 x i8> [[WIDE_LOAD3]], <16 x i8> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[NEXT_GEP2]], align 2 ; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll b/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll index 0a9494e4c7ade..c43d62404006d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/licm-calls.ll @@ -22,7 +22,7 @@ define void @licm_replicate_call(double %x, ptr %dst) { ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> [[TMP3]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x double> [[TMP3]], [[TMP5]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i32 2 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP6]], ptr [[TMP8]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP7]], ptr [[TMP10]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll index c768fec31a497..bdbf08aecf6b3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll @@ -103,7 +103,7 @@ define void @vectorize_without_optsize(ptr %p, i32 %x, i64 %n) { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 +; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; DEFAULT-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] @@ -621,17 +621,17 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 8 +; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i32 8 +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 ; DEFAULT-NEXT: [[TMP7:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]] ; DEFAULT-NEXT: [[TMP8:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD3]] ; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i32 8 +; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP9]], align 2 ; DEFAULT-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2 ; DEFAULT-NEXT: [[TMP12:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i16> @@ -737,17 +737,17 @@ define void @vectorization_forced_minsize_reduce_width() { ; DEFAULT: [[VECTOR_BODY]]: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 8 +; DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i32 8 +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP4]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 ; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 ; DEFAULT-NEXT: [[TMP7:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]] ; DEFAULT-NEXT: [[TMP8:%.*]] = mul nsw <8 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD3]] ; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i32 8 +; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP9]], i64 8 ; DEFAULT-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP9]], align 2 ; DEFAULT-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2 ; DEFAULT-NEXT: [[TMP12:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i16> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll index 287226f14b753..dd0107b8c4bff 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll @@ -52,34 +52,38 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP6]] -; CHECK-SVE-NEXT: [[TMP12:%.*]] = sub <16 x i32> zeroinitializer, [[TMP11]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw [[TMP13]], [[TMP15]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP16]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw [[TMP13]], [[TMP10]] +; CHECK-SVE-NEXT: [[TMP12:%.*]] = sub zeroinitializer, [[TMP11]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -201,33 +205,37 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP12:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = mul nsw [[TMP12]], [[TMP14]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP15]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw [[TMP12]], [[TMP10]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP11]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -349,34 +357,38 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw [[TMP13]], [[TMP15]] +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub nsw zeroinitializer, [[TMP16]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP10]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP13]], [[TMP11]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -502,35 +514,39 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP10]] -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP17:%.*]] = mul nsw [[TMP14]], [[TMP16]] +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub nsw zeroinitializer, [[TMP17]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP10]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP14]], [[TMP11]] +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sub zeroinitializer, [[TMP12]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP13]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -658,35 +674,39 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]]) -; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP12]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw [[TMP13]], [[TMP15]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP16]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw [[TMP13]], [[TMP10]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP11]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP15]], [[TMP10]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE3]], [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]]) +; CHECK-SVE-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE4]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -818,37 +838,41 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]]) -; CHECK-SVE-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP11]] -; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP14]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP10]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP17:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw [[TMP15]], [[TMP17]] +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub nsw zeroinitializer, [[TMP18]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP10]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw [[TMP15]], [[TMP11]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP12]]) +; CHECK-SVE-NEXT: [[TMP13:%.*]] = mul nsw [[TMP17]], [[TMP11]] +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sub zeroinitializer, [[TMP13]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE3]], [[TMP14]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]]) +; CHECK-SVE-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE4]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -978,32 +1002,36 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP5]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP14:%.*]] = mul nsw [[TMP11]], [[TMP13]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP14]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1118,28 +1146,32 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP3]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[TMP6:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE2]]) +; CHECK-SVE-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE2]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1251,32 +1283,36 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] -; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]]) -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP11]]) +; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD1]] to +; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw [[TMP13]], [[TMP14]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[PARTIAL_REDUCE]], [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index 5b9bd0997f2fa..fed979e34e5d5 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -10,26 +10,30 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP8]], [[TMP5]] -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP6]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP9]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: for.exit: -; CHECK-NEXT: ret i32 [[TMP11]] +; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK: scalar.ph: ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index 0ee6b52a2450b..3142227815383 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -61,13 +61,13 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP13]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -164,13 +164,13 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP13]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP13]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -223,11 +223,11 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -259,13 +259,13 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -318,11 +318,11 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -354,13 +354,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NOI8MM-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll index c6c5c5105d540..b2be0e1d7a442 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll @@ -44,11 +44,11 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> @@ -249,7 +249,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP34]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -513,13 +513,13 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> @@ -791,10 +791,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> @@ -805,10 +805,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP17]] ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP21]]) -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP26]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> @@ -819,10 +819,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP25]] ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP29]]) -; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32> @@ -833,10 +833,10 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP33]] ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP37]]) -; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP42]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP46]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32> @@ -1811,13 +1811,13 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index ab593f6f8bb6b..2bea0733f65b0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -12,62 +12,74 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP16]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP20]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul [[TMP4]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: for.exit: -; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP20]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP20]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP20]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP8]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP9]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP7]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP28]], i64 [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP28]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul [[TMP10]], [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP12]]) +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul [[TMP13]], [[TMP14]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP15]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: for.exit: -; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -162,16 +174,16 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE14:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 32 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 48 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 48 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 32 +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 48 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP18]], align 1 @@ -299,16 +311,16 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX2]] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 24 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 8 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 24 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP10]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 24 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 8 +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 24 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[NEXT_GEP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <8 x i16>, ptr [[TMP18]], align 2 @@ -525,7 +537,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP0]] -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP32]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP34]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -789,13 +801,13 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP10]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP10]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP17]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP17]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP17]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> @@ -991,18 +1003,22 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVE1-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP12]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1014,38 +1030,38 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]]) -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP15]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP16]]) -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP18]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP19]]) -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP21]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP22]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP2]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nsw [[TMP15]], [[TMP16]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI3]], [[TMP17]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP4]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext [[WIDE_LOAD5]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = sext [[WIDE_LOAD6]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul nsw [[TMP18]], [[TMP19]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI2]], [[TMP20]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext [[WIDE_LOAD8]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = sext [[WIDE_LOAD9]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = mul nsw [[TMP21]], [[TMP22]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP23]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load , ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = sext [[WIDE_LOAD11]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext [[WIDE_LOAD12]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nsw [[TMP24]], [[TMP25]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP26]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE7]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE13]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE10]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE7]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1053,22 +1069,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVED-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP0]], 5 +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP14]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE28:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE29:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE22:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE23:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE28:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE29:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE22:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE23:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1080,74 +1100,90 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP43]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP12]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP15]]) -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP18]]) -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP22]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP23]]) -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP24]], [[TMP25]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP26]]) -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP27]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP30]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP31]]) -; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP48]], [[TMP33]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP34]]) -; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP35]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP38]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP39]]) -; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP40]], [[TMP41]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP42]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load , ptr [[TMP17]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = shl nuw i64 [[TMP18]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP19]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load , ptr [[TMP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load , ptr [[TMP20]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext [[WIDE_LOAD9]] to +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nsw [[TMP21]], [[TMP22]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI6]], [[TMP23]]) +; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext [[WIDE_LOAD10]] to +; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext [[WIDE_LOAD8]] to +; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw [[TMP24]], [[TMP25]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI7]], [[TMP26]]) +; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = shl nuw i64 [[TMP27]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[TMP28]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load , ptr [[TMP4]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load , ptr [[TMP29]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = shl nuw i64 [[TMP30]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP63:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 [[TMP31]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load , ptr [[TMP63]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext [[WIDE_LOAD12]] to +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext [[WIDE_LOAD14]] to +; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = mul nsw [[TMP33]], [[TMP34]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI4]], [[TMP35]]) +; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = sext [[WIDE_LOAD13]] to +; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext [[WIDE_LOAD15]] to +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = mul nsw [[TMP36]], [[TMP37]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI5]], [[TMP38]]) +; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = shl nuw i64 [[TMP39]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP40]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load , ptr [[TMP41]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = shl nuw i64 [[TMP42]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[TMP43]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load , ptr [[TMP44]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = sext [[WIDE_LOAD18]] to +; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = sext [[WIDE_LOAD20]] to +; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = mul nsw [[TMP45]], [[TMP46]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI2]], [[TMP47]]) +; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext [[WIDE_LOAD19]] to +; CHECK-INTERLEAVED-NEXT: [[TMP49:%.*]] = sext [[WIDE_LOAD21]] to +; CHECK-INTERLEAVED-NEXT: [[TMP50:%.*]] = mul nsw [[TMP48]], [[TMP49]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI3]], [[TMP50]]) +; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = shl nuw i64 [[TMP51]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i64 [[TMP52]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load , ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load , ptr [[TMP53]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP54:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP55:%.*]] = shl nuw i64 [[TMP54]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP55]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load , ptr [[TMP56]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP57:%.*]] = sext [[WIDE_LOAD24]] to +; CHECK-INTERLEAVED-NEXT: [[TMP58:%.*]] = sext [[WIDE_LOAD26]] to +; CHECK-INTERLEAVED-NEXT: [[TMP59:%.*]] = mul nsw [[TMP57]], [[TMP58]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP59]]) +; CHECK-INTERLEAVED-NEXT: [[TMP60:%.*]] = sext [[WIDE_LOAD25]] to +; CHECK-INTERLEAVED-NEXT: [[TMP61:%.*]] = sext [[WIDE_LOAD27]] to +; CHECK-INTERLEAVED-NEXT: [[TMP62:%.*]] = mul nsw [[TMP60]], [[TMP61]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP62]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]] ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE29]], [[PARTIAL_REDUCE28]] -; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX30:%.*]] = add <4 x i32> [[PARTIAL_REDUCE23]], [[PARTIAL_REDUCE22]] -; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX30]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX31:%.*]] = add <4 x i32> [[PARTIAL_REDUCE17]], [[PARTIAL_REDUCE16]] -; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX31]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX32:%.*]] = add <4 x i32> [[PARTIAL_REDUCE11]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX32]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE29]], [[PARTIAL_REDUCE28]] +; CHECK-INTERLEAVED-NEXT: [[TMP64:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX30:%.*]] = add [[PARTIAL_REDUCE23]], [[PARTIAL_REDUCE22]] +; CHECK-INTERLEAVED-NEXT: [[TMP65:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX30]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX31:%.*]] = add [[PARTIAL_REDUCE17]], [[PARTIAL_REDUCE16]] +; CHECK-INTERLEAVED-NEXT: [[TMP66:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX31]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX32:%.*]] = add [[PARTIAL_REDUCE11]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP67:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX32]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -1280,32 +1316,32 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[N]]) ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sext [[WIDE_MASKED_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = mul nsw [[TMP10]], [[TMP13]] +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP15]], zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP19]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP12]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] @@ -1318,32 +1354,32 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0 -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]]) +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 [[N]]) ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP11]], [[ACTIVE_LANE_MASK]], poison) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext [[WIDE_MASKED_LOAD1]] to -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw [[TMP16]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = add [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP19]] = select [[ACTIVE_LANE_MASK]], [[TMP18]], [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP14]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sext [[WIDE_MASKED_LOAD1]] to +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul nsw [[TMP10]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP15]], zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP19]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP12]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] @@ -1416,66 +1452,82 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD]] to ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP9]], [[TMP1]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP6]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul [[TMP5]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[TMP7]] = add [[TMP14]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: for.exit: -; CHECK-INTERLEAVE1-NEXT: [[RESULT:%.*]] = add i32 [[TMP7]], [[TMP8]] -; CHECK-INTERLEAVE1-NEXT: ret i32 [[RESULT]] +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP7]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 8 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = extractelement [[TMP5]], i32 [[TMP12]] +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_extend_user( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP3]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP24]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP3]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP3]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP8]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[TMP10]] = add <16 x i32> [[TMP15]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP11]] = add <16 x i32> [[TMP9]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP8]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul [[TMP12]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul [[TMP13]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP16]] = add [[TMP14]], [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[TMP17]] = add [[TMP15]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP11]], [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 -; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: for.exit: -; CHECK-INTERLEAVED-NEXT: [[RESULT:%.*]] = add i32 [[TMP13]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP17]], [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sub i32 [[TMP21]], 1 +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = extractelement [[TMP13]], i32 [[TMP22]] +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_extend_user( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1538,62 +1590,83 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-LABEL: define i64 @dotp_cost_disagreement( ; CHECK-INTERLEAVE1-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw nsw <16 x i64> [[TMP3]], [[TMP4]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = mul nuw nsw [[TMP12]], [[TMP8]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP9]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i64 @dotp_cost_disagreement( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 5 +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i64 [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP8]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP15]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP13]]) -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP8]], [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP10]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i64 [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP15]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP12]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nuw nsw [[TMP13]], [[TMP21]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP22]]) +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nuw nsw [[TMP16]], [[TMP17]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI1]], [[TMP18]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i64 @dotp_cost_disagreement( @@ -1880,7 +1953,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i64 8 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> @@ -2009,7 +2082,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[TMP1]], i64 8 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> @@ -2093,32 +2166,36 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVE1: for.body.preheader: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = insertelement zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP12]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = mul nuw nsw [[TMP14]], [[TMP10]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP11]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -2130,42 +2207,50 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVED: for.body.preheader: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 5 +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = insertelement zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP21]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw nsw <16 x i64> [[TMP13]], [[TMP15]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP16]]) -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i64> [[TMP10]], [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP12]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP23]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP12]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[NEXT_GEP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nuw nsw [[TMP15]], [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP17]]) +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = zext [[WIDE_LOAD5]] to +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = mul nuw nsw [[TMP18]], [[TMP19]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI1]], [[TMP20]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll index bd9fae6cd610b..80edfb5f0b6ff 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll @@ -20,7 +20,7 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star ; IC2-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] ; IC2-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], %[[VECTOR_BODY]] ] ; IC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 ; IC2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; IC2-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -73,9 +73,9 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star ; IC4-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] -; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; IC4-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32 -; IC4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 48 +; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; IC4-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 32 +; IC4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 48 ; IC4-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll index 672d19b1edeba..a439f5189794a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll @@ -16,13 +16,13 @@ define i32 @not_dotp(ptr %a, ptr %b) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP13:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP14:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll index 6dae09ef97e1c..66b8026d46704 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll @@ -12,65 +12,77 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = sub <16 x i32> zeroinitializer, [[TMP4]] -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext [[WIDE_LOAD1]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = mul [[TMP4]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = sub zeroinitializer, [[TMP6]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP11]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: for.exit: -; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP8]] +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP7]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP7]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub <16 x i32> zeroinitializer, [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP15]]) -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = shl nuw i64 [[TMP20]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP9]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD3]] to +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul [[TMP10]], [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sub zeroinitializer, [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP13]]) +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul [[TMP21]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sub zeroinitializer, [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP17]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: for.exit: -; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index 46ec858d7455c..3d2832eb366ad 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -14,20 +14,25 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP0]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @zext_add_reduc_i8_i32_sve( @@ -35,26 +40,33 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP0]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @zext_add_reduc_i8_i32_sve( @@ -134,7 +146,7 @@ define i32 @zext_add_reduc_i8_i32_neon(ptr %a) #2 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> @@ -196,20 +208,25 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP3]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i64 @zext_add_reduc_i8_i64( @@ -217,26 +234,33 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i64 @zext_add_reduc_i8_i64( @@ -290,20 +314,25 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP3]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv8i64( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i64 @zext_add_reduc_i16_i64( @@ -311,26 +340,33 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP1]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 2 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 2 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv8i64( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv2i64.nxv8i64( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i64 @zext_add_reduc_i16_i64( @@ -413,9 +449,9 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP1]], i32 48 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 32 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP1]], i64 48 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 @@ -486,21 +522,21 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1025) ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext [[WIDE_MASKED_LOAD]] to -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add [[TMP8]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP3]], zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP10]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP4]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1025) +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = xor i1 [[TMP11]], true ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: @@ -515,21 +551,21 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1025) ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext [[WIDE_MASKED_LOAD]] to -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add [[TMP8]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP10]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], [[VEC_PHI]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]], poison) +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext [[WIDE_MASKED_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP3]], zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP10]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP4]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1025) +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = xor i1 [[TMP11]], true ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: @@ -674,20 +710,25 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub <16 x i32> [[VEC_PHI]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub [[VEC_PHI]], [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod( @@ -695,38 +736,49 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 32 -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP1]], i32 48 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = sub <16 x i32> [[VEC_PHI]], [[TMP4]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = sub <16 x i32> [[VEC_PHI1]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP10]] = sub <16 x i32> [[VEC_PHI2]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP11]] = sub <16 x i32> [[VEC_PHI3]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 3 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP6]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 24 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext [[WIDE_LOAD4]] to +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext [[WIDE_LOAD5]] to +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[WIDE_LOAD6]] to +; CHECK-INTERLEAVED-NEXT: [[TMP16]] = sub [[VEC_PHI]], [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[TMP17]] = sub [[VEC_PHI1]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[TMP18]] = sub [[VEC_PHI2]], [[TMP14]] +; CHECK-INTERLEAVED-NEXT: [[TMP19]] = sub [[VEC_PHI3]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX7:%.*]] = add <16 x i32> [[TMP10]], [[BIN_RDX]] -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX8:%.*]] = add <16 x i32> [[TMP11]], [[BIN_RDX7]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX8]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP17]], [[TMP16]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX7:%.*]] = add [[TMP18]], [[BIN_RDX]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX8:%.*]] = add [[TMP19]], [[BIN_RDX7]] +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[BIN_RDX8]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod( @@ -780,20 +832,25 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) -; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; ; CHECK-INTERLEAVED-LABEL: define i32 @sext_add_reduc_i8_i32( @@ -801,26 +858,33 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP0]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP1]], i64 [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext [[WIDE_LOAD2]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI1]], [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; ; CHECK-MAXBW-LABEL: define i32 @sext_add_reduc_i8_i32( @@ -874,28 +938,32 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: [[CONV1:%.*]] = zext i8 [[C]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP5]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP7]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV1]], i64 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[CONV1]], i64 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: store zeroinitializer, ptr [[TMP3]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add [[VEC_PHI]], [[BROADCAST_SPLAT]] +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32( [[TMP8]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -905,33 +973,39 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: [[CONV1:%.*]] = zext i8 [[C]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = shl nuw nsw i32 [[TMP5]], 5 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP13]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV1]], i64 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[CONV1]], i64 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP6]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 1 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI2]], [[BROADCAST_SPLAT]] -; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP16]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 [[TMP9]] +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP3]], align 1 +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15]] = add [[VEC_PHI]], [[BROADCAST_SPLAT]] +; CHECK-INTERLEAVED-NEXT: [[TMP12]] = add [[VEC_PHI2]], [[BROADCAST_SPLAT]] +; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP21]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP12]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32( [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -995,29 +1069,33 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-SAME: i32 [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]], i32 [[D:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP5]], 4 +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], [[TMP2]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP3]], 16 +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 -; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[C]], i64 0 +; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP6]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: store zeroinitializer, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = zext [[BROADCAST_SPLAT]] to +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP8]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1026,34 +1104,40 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-SAME: i32 [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]], i32 [[D:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = sub i32 1024, [[D]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = shl nuw nsw i32 [[TMP5]], 5 +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP2]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nuw i32 [[TMP3]], 32 +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 -; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = insertelement zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[C]], i64 0 +; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi [ [[TMP6]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP5]]) -; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = shl nuw i64 [[TMP8]], 4 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[TMP14]] +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVED-NEXT: store zeroinitializer, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext [[BROADCAST_SPLAT]] to +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP15]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI2]], [[TMP15]]) +; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE2]], [[PARTIAL_REDUCE]] -; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[PARTIAL_REDUCE2]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -1156,9 +1240,9 @@ define i64 @sext_reduction_i32_to_i64(ptr %arr, i64 %n) #1 { ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE8:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE9:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 4 -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 8 -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 12 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 4 +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 8 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 12 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll index 5355a9772ef10..73dbefeb10413 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll @@ -15,7 +15,7 @@ define void @cost_hoisted_vector_code(ptr %p, float %arg) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[P]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP1]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP1]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP1]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP2]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll index 68cfc659e1e94..fceab6f823d5a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll @@ -229,9 +229,9 @@ define void @test_load_gep_widen_induction(ptr noalias %dst, ptr noalias %dst2) ; CHECK-NEXT: store ptr null, ptr [[TMP11]], align 8 ; CHECK-NEXT: store ptr null, ptr [[TMP17]], align 8 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr ptr, ptr [[DST2]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[TMP12]], i32 2 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr ptr, ptr [[TMP12]], i32 4 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[TMP12]], i32 6 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 2 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 4 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 6 ; CHECK-NEXT: store <2 x ptr> [[TMP0]], ptr [[TMP12]], align 8 ; CHECK-NEXT: store <2 x ptr> [[TMP1]], ptr [[TMP13]], align 8 ; CHECK-NEXT: store <2 x ptr> [[TMP2]], ptr [[TMP14]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll index dcb2b9b08d1e9..1d215118449aa 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization.ll @@ -1,19 +1,19 @@ ; REQUIRES: asserts ; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -scalable-vectorization=off < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_DISABLED ; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -scalable-vectorization=on < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_ON -; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -vectorizer-maximize-bandwidth -scalable-vectorization=on < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_ON_MAXBW +; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize --disable-output -vectorizer-maximize-bandwidth=false -scalable-vectorization=on < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK_SCALABLE_ON_NOMAXBW ; Test that the MaxVF for the following loop, that has no dependence distances, ; is calculated as vscale x 4 (max legal SVE vector size) or vscale x 16 ; (maximized bandwidth for i8 in the loop). define void @test0(ptr %a, ptr %b, ptr %c) #0 { ; CHECK: LV: Checking a loop in 'test0' -; CHECK_SCALABLE_ON: LV: Found feasible scalable VF = vscale x 4 -; CHECK_SCALABLE_ON: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON: LV: Found feasible scalable VF = vscale x 16 +; CHECK_SCALABLE_ON: LV: Selecting VF: vscale x 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: vscale x 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 4 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: vscale x 4 entry: br label %loop @@ -43,8 +43,8 @@ define void @test1(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 4 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 4 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: vscale x 4 entry: br label %loop @@ -75,8 +75,8 @@ define void @test2(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 2 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 2 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: 4 entry: br label %loop @@ -107,8 +107,8 @@ define void @test3(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 16 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 16 -; CHECK_SCALABLE_ON_MAXBW: LV: Found feasible scalable VF = vscale x 1 -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 16 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Found feasible scalable VF = vscale x 1 +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: 4 entry: br label %loop @@ -140,8 +140,8 @@ define void @test4(ptr %a, ptr %b) #0 { ; CHECK_SCALABLE_ON: LV: Selecting VF: 4 ; CHECK_SCALABLE_DISABLED-NOT: LV: Found feasible scalable VF ; CHECK_SCALABLE_DISABLED: LV: Selecting VF: 4 -; CHECK_SCALABLE_ON_MAXBW-NOT: LV: Found feasible scalable VF -; CHECK_SCALABLE_ON_MAXBW: LV: Selecting VF: 4 +; CHECK_SCALABLE_ON_NOMAXBW-NOT: LV: Found feasible scalable VF +; CHECK_SCALABLE_ON_NOMAXBW: LV: Selecting VF: 4 entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index 1596b60f48567..365ac6d27fcb0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -9,15 +9,15 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-LABEL: define void @cost_store_i8( ; DEFAULT-SAME: ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] { ; DEFAULT-NEXT: iter.check: +; DEFAULT-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP13:%.*]] = shl nuw i64 [[TMP10]], 2 +; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 101, [[TMP13]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; DEFAULT: vector.main.loop.iter.check: ; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2 +; DEFAULT-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 5 ; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 101, [[TMP1]] -; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] -; DEFAULT: vector.main.loop.iter.check: -; DEFAULT-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 5 -; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 101, [[TMP10]] -; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: ; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; DEFAULT-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 @@ -39,22 +39,22 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 101, [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; DEFAULT: vec.epilog.iter.check: -; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP1]] +; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP13]] ; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; DEFAULT: vec.epilog.ph: ; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; DEFAULT-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 4 -; DEFAULT-NEXT: [[N_MOD_VF2:%.*]] = urem i64 101, [[TMP15]] +; DEFAULT-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; DEFAULT-NEXT: [[N_MOD_VF2:%.*]] = urem i64 101, [[TMP12]] ; DEFAULT-NEXT: [[N_VEC3:%.*]] = sub i64 101, [[N_MOD_VF2]] ; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; DEFAULT: vec.epilog.vector.body: -; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX5]] -; DEFAULT-NEXT: store zeroinitializer, ptr [[TMP19]], align 1 -; DEFAULT-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX5]], [[TMP15]] -; DEFAULT-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC3]] -; DEFAULT-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; DEFAULT-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX1]] +; DEFAULT-NEXT: store zeroinitializer, ptr [[TMP9]], align 1 +; DEFAULT-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP12]] +; DEFAULT-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC3]] +; DEFAULT-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: vec.epilog.middle.block: ; DEFAULT-NEXT: [[CMP_N6:%.*]] = icmp eq i64 101, [[N_VEC3]] ; DEFAULT-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -128,35 +128,46 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; DEFAULT-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; DEFAULT-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; DEFAULT: vector.main.loop.iter.check: -; DEFAULT-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; DEFAULT-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 5 +; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1000, [[TMP1]] +; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[X]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i16> [[BROADCAST_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP1:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META6:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <16 x i64> poison, i64 [[TMP1]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT2]], <16 x i64> poison, <16 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP2:%.*]] = trunc <16 x i64> [[BROADCAST_SPLAT3]] to <16 x i8> -; DEFAULT-NEXT: [[TMP0:%.*]] = trunc <16 x i16> [[BROADCAST_SPLAT]] to <16 x i8> -; DEFAULT-NEXT: [[TMP3:%.*]] = and <16 x i8> [[TMP2]], [[TMP0]] +; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 +; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]] +; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; DEFAULT-NEXT: [[TMP5:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META6:![0-9]+]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[TMP5]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer +; DEFAULT-NEXT: [[TMP6:%.*]] = trunc [[BROADCAST_SPLAT3]] to +; DEFAULT-NEXT: [[TMP13:%.*]] = trunc [[BROADCAST_SPLAT]] to +; DEFAULT-NEXT: [[TMP14:%.*]] = and [[TMP6]], [[TMP13]] ; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] ; DEFAULT: vector.body: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 16 -; DEFAULT-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP4]], align 1, !alias.scope [[META9:![0-9]+]], !noalias [[META6]] -; DEFAULT-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP5]], align 1, !alias.scope [[META9]], !noalias [[META6]] -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992 -; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; DEFAULT-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 +; DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP16]] +; DEFAULT-NEXT: store [[TMP14]], ptr [[TMP4]], align 1, !alias.scope [[META9:![0-9]+]], !noalias [[META6]] +; DEFAULT-NEXT: store [[TMP14]], ptr [[TMP17]], align 1, !alias.scope [[META9]], !noalias [[META6]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; DEFAULT-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; DEFAULT: middle.block: -; DEFAULT-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, [[N_VEC]] +; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; DEFAULT: vec.epilog.iter.check: -; DEFAULT-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF12:![0-9]+]] +; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF12:![0-9]+]] ; DEFAULT: vec.epilog.ph: -; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <8 x i16> poison, i16 [[X]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT4]], <8 x i16> poison, <8 x i32> zeroinitializer -; DEFAULT-NEXT: [[TMP8:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META6]] +; DEFAULT-NEXT: [[TMP8:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META13:![0-9]+]] ; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <8 x i64> poison, i64 [[TMP8]], i64 0 ; DEFAULT-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT7]], <8 x i64> poison, <8 x i32> zeroinitializer ; DEFAULT-NEXT: [[TMP9:%.*]] = trunc <8 x i64> [[BROADCAST_SPLAT8]] to <8 x i8> @@ -165,15 +176,15 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; DEFAULT: vec.epilog.vector.body: ; DEFAULT-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX6]] -; DEFAULT-NEXT: store <8 x i8> [[TMP10]], ptr [[TMP13]], align 1, !alias.scope [[META9]], !noalias [[META6]] +; DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX6]] +; DEFAULT-NEXT: store <8 x i8> [[TMP10]], ptr [[TMP11]], align 1, !alias.scope [[META16:![0-9]+]], !noalias [[META13]] ; DEFAULT-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 8 ; DEFAULT-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT9]], 1000 -; DEFAULT-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; DEFAULT: vec.epilog.middle.block: ; DEFAULT-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; DEFAULT: vec.epilog.scalar.ph: -; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 992, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] ; DEFAULT-NEXT: br label [[LOOP:%.*]] ; DEFAULT: loop: ; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -185,7 +196,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; DEFAULT-NEXT: store i8 [[TRUNC]], ptr [[GEP]], align 1 ; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -202,25 +213,25 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 { ; PRED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; PRED: vector.ph: ; PRED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1000) -; PRED-NEXT: [[TMP2:%.*]] = trunc [[BROADCAST_SPLAT]] to +; PRED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16 +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 1000) +; PRED-NEXT: [[TMP2:%.*]] = trunc [[BROADCAST_SPLAT]] to ; PRED-NEXT: br label [[VECTOR_BODY:%.*]] ; PRED: vector.body: ; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; PRED-NEXT: [[TMP3:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META3:![0-9]+]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[TMP3]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer -; PRED-NEXT: [[TMP4:%.*]] = trunc [[BROADCAST_SPLAT3]] to -; PRED-NEXT: [[TMP5:%.*]] = and [[TMP4]], [[TMP2]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[TMP3]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer +; PRED-NEXT: [[TMP4:%.*]] = trunc [[BROADCAST_SPLAT3]] to +; PRED-NEXT: [[TMP5:%.*]] = and [[TMP4]], [[TMP2]] ; PRED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] -; PRED-NEXT: call void @llvm.masked.store.nxv2i8.p0( [[TMP5]], ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] +; PRED-NEXT: call void @llvm.masked.store.nxv16i8.p0( [[TMP5]], ptr align 1 [[TMP6]], [[ACTIVE_LANE_MASK]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] ; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1000) -; PRED-NEXT: [[TMP7:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX_NEXT]], i64 1000) +; PRED-NEXT: [[TMP7:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; PRED-NEXT: [[TMP8:%.*]] = xor i1 [[TMP7]], true ; PRED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; PRED: middle.block: @@ -278,8 +289,13 @@ attributes #1 = { vscale_range(1,16) "target-features"="+sve" } ; DEFAULT: [[META10]] = distinct !{[[META10]], [[META8]]} ; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]} ; DEFAULT: [[PROF12]] = !{!"branch_weights", i32 8, i32 24} -; DEFAULT: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]} +; DEFAULT: [[META13]] = !{[[META14:![0-9]+]]} +; DEFAULT: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]} +; DEFAULT: [[META15]] = distinct !{[[META15]], !"LVerDomain"} +; DEFAULT: [[META16]] = !{[[META17:![0-9]+]]} +; DEFAULT: [[META17]] = distinct !{[[META17]], [[META15]]} +; DEFAULT: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} +; DEFAULT: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]]} ;. ; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll index 76a7536501bd6..389f91f878534 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll @@ -29,17 +29,17 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA510: [[VECTOR_BODY]]: ; CHECK-CA510-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-CA510-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP2]] -; CHECK-CA510-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 4 +; CHECK-CA510-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i64 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-CA510-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP2]] -; CHECK-CA510-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i32 4 +; CHECK-CA510-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP6]], align 4 ; CHECK-CA510-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; CHECK-CA510-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] ; CHECK-CA510-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] ; CHECK-CA510-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[TMP2]] -; CHECK-CA510-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i32 4 +; CHECK-CA510-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 4 ; CHECK-CA510-NEXT: store <4 x float> [[TMP9]], ptr [[TMP11]], align 4 ; CHECK-CA510-NEXT: store <4 x float> [[TMP10]], ptr [[TMP13]], align 4 ; CHECK-CA510-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP2]], 8 @@ -93,17 +93,17 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA520: [[VECTOR_BODY]]: ; CHECK-CA520-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-CA520-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP2]] -; CHECK-CA520-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i32 4 +; CHECK-CA520-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP3]], i64 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-CA520-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP2]] -; CHECK-CA520-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i32 4 +; CHECK-CA520-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP6]], align 4 ; CHECK-CA520-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; CHECK-CA520-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] ; CHECK-CA520-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] ; CHECK-CA520-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[TMP2]] -; CHECK-CA520-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i32 4 +; CHECK-CA520-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 4 ; CHECK-CA520-NEXT: store <4 x float> [[TMP9]], ptr [[TMP11]], align 4 ; CHECK-CA520-NEXT: store <4 x float> [[TMP10]], ptr [[TMP13]], align 4 ; CHECK-CA520-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP2]], 8 @@ -157,17 +157,17 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA320: [[VECTOR_BODY]]: ; CHECK-CA320-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-CA320-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]] -; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-CA320-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]] -; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i32 4 +; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i64 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-CA320-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-CA320-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] ; CHECK-CA320-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] ; CHECK-CA320-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]] -; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4 +; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 4 ; CHECK-CA320-NEXT: store <4 x float> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-CA320-NEXT: store <4 x float> [[TMP7]], ptr [[TMP9]], align 4 ; CHECK-CA320-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll index 871d9be609bd7..873b18beb85aa 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll @@ -238,31 +238,37 @@ for.exit: define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 { ; CHECK-LABEL: define void @histogram_8bit( ; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: entry: +; CHECK-NEXT: iter.check: ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP5]], 2 +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP5]], 3 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP9]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK: vector.main.loop.iter.check: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP2]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP6]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 -; CHECK-NEXT: [[DOTNOT:%.*]] = sub nsw i64 0, [[TMP4]] +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 +; CHECK-NEXT: [[DOTNOT:%.*]] = add nsw i64 [[TMP4]], -1 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNOT]] +; CHECK-NEXT: [[N_VEC1:%.*]] = sub i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = zext [[WIDE_LOAD]] to -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], [[TMP6]] -; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i8( [[TMP7]], i8 1, splat (i1 true)) +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], [[TMP8]] +; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8( [[TMP17]], i8 1, splat (i1 true)) ; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP4]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC1]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: +; CHECK-NEXT: [[CMP_N1:%.*]] = icmp eq i64 [[N_VEC]], 0 +; CHECK-NEXT: br i1 [[CMP_N1]], label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK: vec.epilog.iter.check: ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll index 2a19402347e40..6eb8242bf7975 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll @@ -178,9 +178,9 @@ define void @test_interleave_store_one_constant(ptr noalias %src, ptr noalias %d ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 6 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP13]], i32 2 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP13]], i32 4 -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP13]], i32 6 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP13]], i64 2 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP13]], i64 4 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP13]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP13]], align 8 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP15]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP16]], align 8 @@ -323,9 +323,9 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr ; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 6 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 2 -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 4 -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 6 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 2 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 4 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i64 6 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP23]], align 8 ; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x double>, ptr [[TMP25]], align 8 ; CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x double>, ptr [[TMP26]], align 8 @@ -456,8 +456,9 @@ define void @test_interleave_after_narrowing(i32 %n, ptr %x, ptr noalias %y) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br [[EXIT:label %.*]] -; CHECK: [[SCALAR_PH:.*:]] +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll index 46b0ebdd2fa62..99c735f777b66 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-multi-block.ll @@ -88,7 +88,7 @@ define void @load_store_interleave_group_block_invar_cond(ptr noalias %data, ptr ; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE11]] ; VF2IC2: [[PRED_STORE_CONTINUE11]]: ; VF2IC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[DST_1]], i64 [[INDEX]] -; VF2IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 2 +; VF2IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 2 ; VF2IC2-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP5]], align 1 ; VF2IC2-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP6]], align 1 ; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -199,7 +199,7 @@ define void @load_store_interleave_group_block_var_cond(ptr noalias %data, ptr % ; VF2IC2-NEXT: [[INTERLEAVED_VEC5:%.*]] = shufflevector <4 x i64> [[TMP6]], <4 x i64> poison, <4 x i32> ; VF2IC2-NEXT: store <4 x i64> [[INTERLEAVED_VEC5]], ptr [[TMP4]], align 8 ; VF2IC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[MASKS]], i64 [[INDEX]] -; VF2IC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 2 +; VF2IC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 2 ; VF2IC2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[TMP7]], align 1 ; VF2IC2-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x i8>, ptr [[TMP8]], align 1 ; VF2IC2-NEXT: [[TMP9:%.*]] = icmp eq <2 x i8> [[WIDE_LOAD]], zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll index b63e03dccdc18..d82dace2c9e04 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll @@ -172,14 +172,13 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst) ; ; CHECK-LABEL: define void @test_masked_interleave_group( ; CHECK-SAME: i32 [[N:%.*]], ptr [[MASK:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP3]], i64 8) +; CHECK-NEXT: [[UMAX:%.*]] = shl nuw i64 [[TMP2]], 3 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[UMAX]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 @@ -195,45 +194,129 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst) ; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] -; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; CHECK-NEXT: [[TMP20:%.*]] = shl nuw i64 [[TMP8]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK6:%.*]] = icmp ult i64 [[TMP1]], [[TMP20]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP21]], 16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP9]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[INDEX1]], 16 +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP25]] +; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[INDEX1]], 16 +; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP26]] +; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX1]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP10]], align 1, !alias.scope [[META6:![0-9]+]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp eq [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call @llvm.vector.interleave4.nxv64i1( [[TMP27]], [[TMP27]], [[TMP27]], [[TMP27]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call @llvm.masked.load.nxv64f32.p0(ptr align 4 [[NEXT_GEP9]], [[INTERLEAVED_MASK]], poison), !alias.scope [[META9:![0-9]+]] +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , , , } @llvm.vector.deinterleave4.nxv64f32( [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP28:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave4.nxv64f32( [[TMP28]], [[TMP16]], [[TMP17]], [[TMP18]]) +; CHECK-NEXT: [[INTERLEAVED_MASK9:%.*]] = call @llvm.vector.interleave4.nxv64i1( [[TMP27]], [[TMP27]], [[TMP27]], [[TMP27]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv64f32.p0( [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP1]], [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP9]] +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], 16 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 16 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[N_VEC]] -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[UMAX]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP23:%.*]] = mul nuw i64 [[TMP22]], 8 +; CHECK-NEXT: [[N_MOD_VF10:%.*]] = urem i64 [[TMP1]], [[TMP23]] +; CHECK-NEXT: [[INDEX:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF10]] +; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX6:%.*]] = mul i64 [[INDEX]], 16 ; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX6]] ; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP8]], align 1, !alias.scope [[META6:![0-9]+]] -; CHECK-NEXT: [[TMP16:%.*]] = icmp eq [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call @llvm.vector.interleave4.nxv16i1( [[TMP16]], [[TMP16]], [[TMP16]], [[TMP16]]) -; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call @llvm.masked.load.nxv16f32.p0(ptr align 4 [[NEXT_GEP7]], [[INTERLEAVED_MASK]], poison), !alias.scope [[META9:![0-9]+]] -; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , , , } @llvm.vector.deinterleave4.nxv16f32( [[WIDE_MASKED_VEC]]) -; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 0 -; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 2 -; CHECK-NEXT: [[TMP20:%.*]] = extractvalue { , , , } [[STRIDED_VEC]], 3 -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave4.nxv16f32( [[TMP17]], [[TMP18]], [[TMP19]], [[TMP20]]) -; CHECK-NEXT: [[INTERLEAVED_MASK9:%.*]] = call @llvm.vector.interleave4.nxv16i1( [[TMP16]], [[TMP16]], [[TMP16]], [[TMP16]]) -; CHECK-NEXT: call void @llvm.masked.store.nxv16f32.p0( [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP]], [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT23:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX12]], 16 +; CHECK-NEXT: [[NEXT_GEP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX1]] +; CHECK-NEXT: [[OFFSET_IDX14:%.*]] = mul i64 [[INDEX12]], 16 +; CHECK-NEXT: [[NEXT_GEP15:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX14]] +; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX12]] +; CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load , ptr [[NEXT_GEP16]], align 1, !alias.scope [[META6]] +; CHECK-NEXT: [[TMP30:%.*]] = icmp eq [[WIDE_LOAD17]], zeroinitializer +; CHECK-NEXT: [[INTERLEAVED_MASK18:%.*]] = call @llvm.vector.interleave4.nxv32i1( [[TMP30]], [[TMP30]], [[TMP30]], [[TMP30]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC19:%.*]] = call @llvm.masked.load.nxv32f32.p0(ptr align 4 [[NEXT_GEP15]], [[INTERLEAVED_MASK18]], poison), !alias.scope [[META9]] +; CHECK-NEXT: [[STRIDED_VEC20:%.*]] = call { , , , } @llvm.vector.deinterleave4.nxv32f32( [[WIDE_MASKED_VEC19]]) +; CHECK-NEXT: [[TMP31:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 0 +; CHECK-NEXT: [[TMP32:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 1 +; CHECK-NEXT: [[TMP33:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 2 +; CHECK-NEXT: [[TMP34:%.*]] = extractvalue { , , , } [[STRIDED_VEC20]], 3 +; CHECK-NEXT: [[INTERLEAVED_VEC21:%.*]] = call @llvm.vector.interleave4.nxv32f32( [[TMP31]], [[TMP32]], [[TMP33]], [[TMP34]]) +; CHECK-NEXT: [[INTERLEAVED_MASK22:%.*]] = call @llvm.vector.interleave4.nxv32i1( [[TMP30]], [[TMP30]], [[TMP30]], [[TMP30]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv32f32.p0( [[INTERLEAVED_VEC21]], ptr align 4 [[NEXT_GEP13]], [[INTERLEAVED_MASK22]]), !alias.scope [[META11]], !noalias [[META13]] +; CHECK-NEXT: [[INDEX_NEXT23]] = add nuw i64 [[INDEX12]], [[TMP23]] +; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT23]], [[INDEX]] +; CHECK-NEXT: br i1 [[TMP35]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N24:%.*]] = icmp eq i64 [[TMP1]], [[INDEX]] +; CHECK-NEXT: br i1 [[CMP_N24]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP24]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP10]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL25:%.*]] = phi ptr [ [[NEXT_GEP]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[DST]], %[[VECTOR_MEMCHECK]] ], [ [[DST]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL26:%.*]] = phi ptr [ [[NEXT_GEP7]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP14]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[SRC]], %[[VECTOR_MEMCHECK]] ], [ [[SRC]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL27:%.*]] = phi ptr [ [[NEXT_GEP8]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP15]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[MASK]], %[[VECTOR_MEMCHECK]] ], [ [[MASK]], %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[DST_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL25]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[DST_IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[SRC_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL26]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[SRC_IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[MASK_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL27]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[MASK_IV_NEXT:%.*]], %[[LOOP_LATCH]] ] +; CHECK-NEXT: [[MASK_IV_NEXT]] = getelementptr i8, ptr [[MASK_IV]], i64 1 +; CHECK-NEXT: [[MASK_VAL:%.*]] = load i8, ptr [[MASK_IV]], align 1 +; CHECK-NEXT: [[SHOULD_COPY:%.*]] = icmp eq i8 [[MASK_VAL]], 0 +; CHECK-NEXT: br i1 [[SHOULD_COPY]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[ELEM0:%.*]] = load float, ptr [[SRC_IV]], align 4 +; CHECK-NEXT: store float [[ELEM0]], ptr [[DST_IV]], align 4 +; CHECK-NEXT: [[SRC_1_PTR:%.*]] = getelementptr i8, ptr [[SRC_IV]], i64 4 +; CHECK-NEXT: [[S1:%.*]] = load float, ptr [[SRC_1_PTR]], align 4 +; CHECK-NEXT: [[DST_1_PTR:%.*]] = getelementptr i8, ptr [[DST_IV]], i64 4 +; CHECK-NEXT: store float [[S1]], ptr [[DST_1_PTR]], align 4 +; CHECK-NEXT: [[SRC_2_PTR:%.*]] = getelementptr i8, ptr [[SRC_IV]], i64 8 +; CHECK-NEXT: [[S2:%.*]] = load float, ptr [[SRC_2_PTR]], align 4 +; CHECK-NEXT: [[DST_2_PTR:%.*]] = getelementptr i8, ptr [[DST_IV]], i64 8 +; CHECK-NEXT: store float [[S2]], ptr [[DST_2_PTR]], align 4 +; CHECK-NEXT: [[SRC_3_PTR:%.*]] = getelementptr i8, ptr [[SRC_IV]], i64 12 +; CHECK-NEXT: [[S3:%.*]] = load float, ptr [[SRC_3_PTR]], align 4 +; CHECK-NEXT: [[DST_3_PTR:%.*]] = getelementptr i8, ptr [[DST_IV]], i64 12 +; CHECK-NEXT: store float [[S3]], ptr [[DST_3_PTR]], align 4 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[SRC_IV_NEXT]] = getelementptr i8, ptr [[SRC_IV]], i64 16 +; CHECK-NEXT: [[DST_IV_NEXT]] = getelementptr i8, ptr [[DST_IV]], i64 16 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll index d290f2d4f5bc3..b14b1783c97e3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll @@ -62,7 +62,7 @@ define void @test_2xi64_with_wide_load(ptr noalias %data, ptr noalias %factor) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8 ; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = shl nsw i64 [[INDEX]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll index 7bc606f5c61b3..0cfc14a0ae3a8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll @@ -23,23 +23,27 @@ define void @zext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> -; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP4]], splat (i16 2) -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] -; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], splat (i16 2) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] +; CHECK-NEXT: store [[TMP9]], ptr [[TMP10]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -99,23 +103,27 @@ define void @sext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i16> -; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP4]], splat (i16 2) -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] -; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = sext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], splat (i16 2) +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] +; CHECK-NEXT: store [[TMP9]], ptr [[TMP10]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll index 44b4e5a8c2bc7..4ede21040f393 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll @@ -17,9 +17,9 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i32 16 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i32 32 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i32 48 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 32 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i64 48 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[DST]], align 4 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP2]], align 4 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP3]], align 4 @@ -30,7 +30,7 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF0:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] @@ -49,7 +49,7 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: store i8 0, ptr [[GEP]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[TC]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -69,7 +69,8 @@ exit: ret void } ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[PROF0]] = !{!"branch_weights", i32 8, i32 56} +; CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]} +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[META3]] = !{!"llvm.loop.isvectorized", i32 1} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll index 2abc787061b53..ec874d0b48030 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse.ll @@ -11,14 +11,14 @@ define void @vector_reverse_f64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-LABEL: vector_reverse_f64 ; CHECK-LABEL: vector.body -; CHECK: %[[GEP:.*]] = getelementptr inbounds double, ptr %{{.*}}, i32 0 -; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds double, ptr %[[GEP]], i32 -7 +; CHECK: %[[GEP:.*]] = getelementptr inbounds double, ptr %{{.*}}, i64 0 +; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds double, ptr %[[GEP]], i64 -7 ; CHECK-NEXT: %[[WIDE:.*]] = load <8 x double>, ptr %[[GEP1]], align 8 ; CHECK-NEXT: %[[REVERSE:.*]] = shufflevector <8 x double> %[[WIDE]], <8 x double> poison, <8 x i32> ; CHECK-NEXT: %[[FADD:.*]] = fadd <8 x double> %[[REVERSE]] ; CHECK-NEXT: %[[GEP2:.*]] = getelementptr inbounds double, ptr {{.*}}, i64 {{.*}} -; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds double, ptr %[[GEP2]], i32 0 -; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds double, ptr %[[GEP3]], i32 -7 +; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds double, ptr %[[GEP2]], i64 0 +; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds double, ptr %[[GEP3]], i64 -7 ; CHECK-NEXT: %[[REVERSE6:.*]] = shufflevector <8 x double> %[[FADD]], <8 x double> poison, <8 x i32> ; CHECK-NEXT: store <8 x double> %[[REVERSE6]], ptr %[[GEP4]], align 8 @@ -44,14 +44,14 @@ for.body: ; preds = %entry, %for.body define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-LABEL: vector_reverse_i64 ; CHECK-LABEL: vector.body -; CHECK: %[[GEP:.*]] = getelementptr inbounds i64, ptr %{{.*}}, i32 0 -; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds i64, ptr %[[GEP]], i32 -7 +; CHECK: %[[GEP:.*]] = getelementptr inbounds i64, ptr %{{.*}}, i64 0 +; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds i64, ptr %[[GEP]], i64 -7 ; CHECK-NEXT: %[[WIDE:.*]] = load <8 x i64>, ptr %[[GEP1]], align 8 ; CHECK-NEXT: %[[REVERSE:.*]] = shufflevector <8 x i64> %[[WIDE]], <8 x i64> poison, <8 x i32> ; CHECK-NEXT: %[[FADD:.*]] = add <8 x i64> %[[REVERSE]] ; CHECK-NEXT: %[[GEP2:.*]] = getelementptr inbounds i64, ptr {{.*}}, i64 {{.*}} -; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds i64, ptr %[[GEP2]], i32 0 -; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds i64, ptr %[[GEP3]], i32 -7 +; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds i64, ptr %[[GEP2]], i64 0 +; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds i64, ptr %[[GEP3]], i64 -7 ; CHECK-NEXT: %[[REVERSE6:.*]] = shufflevector <8 x i64> %[[FADD]], <8 x i64> poison, <8 x i32> ; CHECK-NEXT: store <8 x i64> %[[REVERSE6]], ptr %[[GEP4]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll index 6df3f1b418eb6..a1d03c4a7fbc6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll @@ -80,8 +80,8 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) "target-features"="+neon,+do ; CHECK-NEXT: Successor(s): vector.body ; CHECK-EMPTY: ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] -; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<[[RDX_START]]>, ir<%add> (VF scaled by 1/4) +; CHECK-NEXT: EMIT-SCALAR vp<[[EP_IV:%.+]]> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX:%.+]]> = phi vp<[[RDX_START]]>, ir<[[RDX_NEXT:%.+]]> (VF scaled by 1/4) ; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%index> ; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a> ; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%index> @@ -89,13 +89,13 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) "target-features"="+neon,+do ; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32 ; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32 ; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a> -; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<16> -; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<1024> +; CHECK-NEXT: PARTIAL-REDUCE ir<[[RDX_NEXT]]> = ir<[[RDX]]> + reduce.add (ir<%mul>) +; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16> +; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, ir<1024> ; CHECK-NEXT: Successor(s): middle.block, vector.body ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[RED_RESULT:%[0-9]+]]> = compute-reduction-result ir<%accum>, ir<%add> +; CHECK-NEXT: EMIT vp<[[RED_RESULT:%[0-9]+]]> = compute-reduction-result ir<[[RDX]]>, ir<[[RDX_NEXT]]> ; CHECK-NEXT: Successor(s): ir-bb ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll index fc0b19da47f4b..82f272ad853a8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/wider-VF-for-callinst.ll @@ -47,38 +47,37 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #1 { ; NARROW-NEXT: entry: ; NARROW-NEXT: br label [[VECTOR_PH:%.*]] ; NARROW: vector.ph: +; NARROW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NARROW-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 4 +; NARROW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP1]] +; NARROW-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; NARROW-NEXT: br label [[VECTOR_BODY:%.*]] ; NARROW: vector.body: ; NARROW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; NARROW-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[B:%.*]], i64 [[INDEX]] -; NARROW-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8 -; NARROW-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[WIDE_LOAD]] to <2 x float> -; NARROW-NEXT: [[TMP2:%.*]] = extractelement <2 x float> [[TMP1]], i32 0 -; NARROW-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP1]], i32 1 -; NARROW-NEXT: [[TMP3:%.*]] = call float @foo(float [[TMP2]]) #[[ATTR1:[0-9]+]] -; NARROW-NEXT: [[TMP5:%.*]] = call float @foo(float [[TMP4]]) #[[ATTR1]] -; NARROW-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i32 0 -; NARROW-NEXT: [[TMP7:%.*]] = insertelement <2 x float> [[TMP6]], float [[TMP5]], i32 1 +; NARROW-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP0]], align 8 +; NARROW-NEXT: [[TMP3:%.*]] = fptrunc [[WIDE_LOAD]] to +; NARROW-NEXT: [[TMP4:%.*]] = call @foo_vector( [[TMP3]], splat (i1 true)) ; NARROW-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NARROW-NEXT: store <2 x float> [[TMP7]], ptr [[TMP8]], align 4 -; NARROW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; NARROW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; NARROW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NARROW-NEXT: store [[TMP4]], ptr [[TMP8]], align 4 +; NARROW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; NARROW-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NARROW-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NARROW: middle.block: -; NARROW-NEXT: br label [[SCALAR_PH:%.*]] +; NARROW-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH:%.*]] ; NARROW: scalar.ph: ; NARROW-NEXT: br label [[FOR_BODY:%.*]] ; NARROW: for.body: -; NARROW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; NARROW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[N_VEC]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; NARROW-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[INDVARS_IV]] ; NARROW-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8 ; NARROW-NEXT: [[TRUNC:%.*]] = fptrunc double [[LOAD]] to float -; NARROW-NEXT: [[CALL:%.*]] = call float @foo(float [[TRUNC]]) #[[ATTR1]] +; NARROW-NEXT: [[CALL:%.*]] = call float @foo(float [[TRUNC]]) #[[ATTR2:[0-9]+]] ; NARROW-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] ; NARROW-NEXT: store float [[CALL]], ptr [[ARRAYIDX]], align 4 ; NARROW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; NARROW-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 -; NARROW-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NARROW-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; NARROW: for.cond.cleanup: ; NARROW-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll index 7afa8ce998121..e05332abcee61 100644 --- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll @@ -22,7 +22,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll index abbd176a1df6e..478c9c1141949 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll @@ -51,17 +51,17 @@ define i1 @select_exit_cond(ptr %start, ptr %end, i64 %N) { ; CHECK-NEXT: [[STEP_ADD_10:%.*]] = add <2 x i64> [[STEP_ADD_9]], splat (i64 2) ; CHECK-NEXT: [[STEP_ADD_11:%.*]] = add <2 x i64> [[STEP_ADD_10]], splat (i64 2) ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 2 -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 4 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 6 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 8 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 10 -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 12 -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 14 -; CHECK-NEXT: [[TMP68:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 18 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 20 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 22 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 2 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 4 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 6 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 8 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 10 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 12 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 14 +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 18 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 20 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 22 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[NEXT_GEP]], align 1 ; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <2 x i8>, ptr [[TMP12]], align 1 ; CHECK-NEXT: [[WIDE_LOAD26:%.*]] = load <2 x i8>, ptr [[TMP13]], align 1 @@ -193,6 +193,7 @@ define i1 @select_exit_cond(ptr %start, ptr %end, i64 %N) { ; CHECK-NEXT: [[CMP_I166_I:%.*]] = icmp ult ptr [[PTR_IV]], [[END]] ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[IV]], [[N]] ; CHECK-NEXT: [[AND:%.*]] = select i1 [[CMP_I166_I]], i1 [[CMP2]], i1 false +; CHECK-NEXT: br i1 [[AND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP52]], %[[MIDDLE_BLOCK]] ], [ [[TMP55]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[RES:%.*]] = icmp eq i64 [[RED_NEXT_LCSSA]], 0 @@ -226,4 +227,6 @@ exit: ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[PROF3]] = !{!"branch_weights", i32 2, i32 22} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll index 7677c9666455a..f1fbf1dd5d942 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll @@ -22,13 +22,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK: [[VECTOR_BODY]]: ; VF-TWO-CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; VF-TWO-CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 28 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP17]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP18]], align 4 @@ -38,13 +38,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP22]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP23]], align 4 ; VF-TWO-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 28 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP24]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP33]], align 4 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 @@ -62,13 +62,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK-NEXT: [[TMP46:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD15]] ; VF-TWO-CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD16]] ; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 28 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP40]], ptr [[TMP48]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP41]], ptr [[TMP57]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP42]], ptr [[TMP58]], align 4 @@ -124,13 +124,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK: [[VECTOR_BODY]]: ; VF-FOUR-CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; VF-FOUR-CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 28 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP8]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP17]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP18]], align 4 @@ -140,13 +140,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP22]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP23]], align 4 ; VF-FOUR-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 28 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP24]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP33]], align 4 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 @@ -164,13 +164,13 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK-NEXT: [[TMP46:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD15]] ; VF-FOUR-CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD16]] ; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 28 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP40]], ptr [[TMP48]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP41]], ptr [[TMP57]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP42]], ptr [[TMP58]], align 4 @@ -261,22 +261,22 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP32:%.*]] = add i32 [[TMP24]], [[N]] ; VF-TWO-CHECK-NEXT: [[TMP40:%.*]] = sext i32 [[TMP32]] to i64 ; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP40]] -; VF-TWO-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 -; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP56]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 -; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP58]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 -; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP60]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 -; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 -; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 -; VF-TWO-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 -; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 -; VF-TWO-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 -; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP70]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 0 +; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -4 +; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -8 +; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -12 +; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -16 +; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -20 +; VF-TWO-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -24 +; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP25]], i64 -3 +; VF-TWO-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -28 +; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 -3 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP57]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP59]], align 4 @@ -302,13 +302,13 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP78:%.*]] = fadd fast <4 x float> [[REVERSE13]], splat (float 1.000000e+00) ; VF-TWO-CHECK-NEXT: [[TMP79:%.*]] = fadd fast <4 x float> [[REVERSE15]], splat (float 1.000000e+00) ; VF-TWO-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 4 -; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 8 -; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 12 -; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 16 -; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 20 -; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 24 -; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 28 +; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 4 +; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 8 +; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 12 +; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 16 +; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 20 +; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 24 +; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 28 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP72]], ptr [[TMP80]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP73]], ptr [[TMP89]], align 4 ; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP74]], ptr [[TMP90]], align 4 @@ -340,8 +340,8 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP100:%.*]] = add i32 [[TMP99]], [[N]] ; VF-TWO-CHECK-NEXT: [[TMP101:%.*]] = sext i32 [[TMP100]] to i64 ; VF-TWO-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP101]] -; VF-TWO-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i32 0 -; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP103]], i32 -1 +; VF-TWO-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i64 0 +; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP50]], i64 -1 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <2 x float>, ptr [[TMP104]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE24:%.*]] = shufflevector <2 x float> [[WIDE_LOAD23]], <2 x float> poison, <2 x i32> ; VF-TWO-CHECK-NEXT: [[TMP105:%.*]] = fadd fast <2 x float> [[REVERSE24]], splat (float 1.000000e+00) @@ -384,22 +384,22 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP32:%.*]] = add i32 [[TMP24]], [[N]] ; VF-FOUR-CHECK-NEXT: [[TMP40:%.*]] = sext i32 [[TMP32]] to i64 ; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP40]] -; VF-FOUR-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 -; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP56]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 -; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP58]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 -; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP60]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 -; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 -; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 -; VF-FOUR-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 -; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 -; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP70]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 0 +; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -4 +; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -8 +; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -12 +; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -16 +; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -20 +; VF-FOUR-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -24 +; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP25]], i64 -3 +; VF-FOUR-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i64 -28 +; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 -3 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP57]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP59]], align 4 @@ -425,13 +425,13 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP78:%.*]] = fadd fast <4 x float> [[REVERSE13]], splat (float 1.000000e+00) ; VF-FOUR-CHECK-NEXT: [[TMP79:%.*]] = fadd fast <4 x float> [[REVERSE15]], splat (float 1.000000e+00) ; VF-FOUR-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 4 -; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 8 -; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 12 -; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 16 -; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 20 -; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 24 -; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 28 +; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 4 +; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 8 +; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 12 +; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 16 +; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 20 +; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 24 +; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i64 28 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP72]], ptr [[TMP80]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP73]], ptr [[TMP89]], align 4 ; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP74]], ptr [[TMP90]], align 4 @@ -463,8 +463,8 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP100:%.*]] = add i32 [[TMP99]], [[N]] ; VF-FOUR-CHECK-NEXT: [[TMP101:%.*]] = sext i32 [[TMP100]] to i64 ; VF-FOUR-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP101]] -; VF-FOUR-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i32 0 -; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP103]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i64 0 +; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP50]], i64 -3 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <4 x float>, ptr [[TMP104]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE24:%.*]] = shufflevector <4 x float> [[WIDE_LOAD23]], <4 x float> poison, <4 x i32> ; VF-FOUR-CHECK-NEXT: [[TMP105:%.*]] = fadd fast <4 x float> [[REVERSE24]], splat (float 1.000000e+00) diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll index d82a3cde4639a..dc9c154b3fe05 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll @@ -34,13 +34,13 @@ define void @test(ptr %arr, i32 %len) { ; CHECK-NEXT: [[VEC_PHI7:%.*]] = phi <2 x double> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI8:%.*]] = phi <2 x double> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[ARR]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 4 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 6 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 8 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 10 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 12 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i32 14 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 2 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 6 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 8 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 10 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 12 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[TMP3]], i64 14 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP3]], align 8 ; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x double>, ptr [[TMP6]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index 9f6f79d9030ed..41d3af2d5e763 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -69,51 +69,51 @@ exit: define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-LABEL: define i8 @dead_live_out_due_to_scalar_epilogue_required( ; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP1]], i32 6) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 252, [[TMP2]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 1005 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 1005 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 252, [[TMP4]] -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 252, [[TMP6]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i32 [[N_VEC]], 4 -; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.stepvector.nxv4i32() -; CHECK-NEXT: [[TMP11:%.*]] = mul [[TMP9]], splat (i32 4) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP11]] -; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP4]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i32 [[TMP14]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv16i32() +; CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i32 4) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP15:%.*]] = sext [[VEC_IND]] to -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], [[TMP15]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0( zeroinitializer, align 1 [[TMP16]], splat (i1 true)), !alias.scope [[META3:![0-9]+]], !noalias [[META6:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 252, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i32 [[TMP2]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 4, [[TMP2]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = icmp uge [[TMP0]], [[BROADCAST_SPLAT3]] +; CHECK-NEXT: [[TMP9:%.*]] = sext [[VEC_IND]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], [[TMP9]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv16i8.nxv16p0( align 1 [[TMP6]], splat (i1 true), i32 [[TMP2]]), !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], [[TMP9]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0( zeroinitializer, align 1 [[TMP7]], splat (i1 true), i32 [[TMP2]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP2]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1( [[TMP5]], i1 false) +; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement [[WIDE_MASKED_GATHER]], i64 [[TMP11]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[IV]] to i64 ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IDXPROM]] ; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 @@ -121,9 +121,9 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV]], 1001 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ] +; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ], [ [[TMP15]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i8 [[R]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll index 8e71718061c9b..feaf26e366962 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll @@ -639,73 +639,50 @@ for.end: define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-LABEL: @udiv_sdiv_with_invariant_divisors( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 12, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 12, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 12, [[N_MOD_VF]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[Y:%.*]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i8 [[X:%.*]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16 -; CHECK-NEXT: [[TMP4:%.*]] = add i16 -12, [[DOTCAST]] -; CHECK-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: [[TMP5:%.*]] = add i8 -12, [[DOTCAST5]] -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], splat (i8 1), [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[C]], splat (i16 1), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.stepvector.nxv2i8() -; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], splat (i8 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i8 -12), [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP3]] to i8 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement poison, i8 [[TMP10]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector [[BROADCAST_SPLATINSERT6]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i1 [[C:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = xor [[BROADCAST_SPLAT]], splat (i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i8 [[X:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i16 [[Y:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv8i8() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i8 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i8 -12), [[TMP2]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = udiv [[VEC_IND]], [[TMP6]] -; CHECK-NEXT: [[TMP12:%.*]] = zext [[TMP11]] to -; CHECK-NEXT: [[TMP13:%.*]] = sdiv [[TMP12]], [[TMP7]] -; CHECK-NEXT: [[TMP14:%.*]] = sext [[TMP13]] to -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[C]], zeroinitializer, [[TMP14]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT7]] -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 12, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i8 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp uge [[TMP5]], [[BROADCAST_SPLAT8]] +; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vp.merge.nxv8i8( [[TMP0]], [[BROADCAST_SPLAT2]], splat (i8 1), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP9:%.*]] = udiv [[VEC_IND]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = zext [[TMP9]] to +; CHECK-NEXT: [[TMP11:%.*]] = call @llvm.vp.merge.nxv8i16( [[TMP0]], [[BROADCAST_SPLAT4]], splat (i16 1), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP12:%.*]] = sdiv [[TMP10]], [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = sext [[TMP12]] to +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[C]], zeroinitializer, [[TMP13]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP3]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i32 [[TMP16]], 2 -; CHECK-NEXT: [[TMP18:%.*]] = sub i32 [[TMP17]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = extractelement [[PREDPHI]], i32 [[TMP18]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 12, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ -12, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i8 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ -12, [[ENTRY]] ] -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[NARROW_IV:%.*]] = phi i8 [ [[BC_RESUME_VAL8]], [[SCALAR_PH]] ], [ [[IV_NEXT_TRUNC:%.*]], [[LOOP_LATCH]] ] -; CHECK-NEXT: br i1 [[C]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; CHECK: then: -; CHECK-NEXT: [[UD:%.*]] = udiv i8 [[NARROW_IV]], [[X]] -; CHECK-NEXT: [[UD_EXT:%.*]] = zext i8 [[UD]] to i16 -; CHECK-NEXT: [[SD:%.*]] = sdiv i16 [[UD_EXT]], [[Y]] -; CHECK-NEXT: [[SD_EXT:%.*]] = sext i16 [[SD]] to i32 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 0, [[LOOP_HEADER]] ], [ [[SD_EXT]], [[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; CHECK-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv8i1( [[TMP15]], i1 false) +; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 8 +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 0 +; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = extractelement [[PREDPHI]], i64 [[TMP17]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MERGE_LCSSA]] ; ; FIXED-LABEL: @udiv_sdiv_with_invariant_divisors( diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll index 7eb3d7fc5a36d..97c9fd75a42bb 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll @@ -7,55 +7,54 @@ target triple = "riscv64-unknown-linux-gnu" define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 { ; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 23, [[TMP0]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 -; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i64 0, i32 [[TMP4]] +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP4]], 1 +; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i64 0, i32 [[TMP5]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD]] = load , ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vector.splice.nxv1i64( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 23, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP6]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP8:%.*]] = icmp uge [[TMP7]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: [[VP_OP_LOAD]] = call @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call @llvm.experimental.vp.splice.nxv2i64( [[VECTOR_RECUR]], [[VP_OP_LOAD]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP6]]) +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP10]], ptr align 8 [[TMP11]], splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP6]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 -; CHECK-NEXT: [[TMP16:%.*]] = extractelement [[TMP7]], i32 [[TMP15]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 23, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L]] = load i64, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP8]], i1 false) +; CHECK-NEXT: [[TMP15:%.*]] = sub i64 [[TMP14]], 1 +; CHECK-NEXT: [[TMP16:%.*]] = sub i64 [[TMP15]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2 +; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 0 +; CHECK-NEXT: [[TMP20:%.*]] = extractelement [[VP_OP_LOAD]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP22:%.*]] = mul nuw i32 [[TMP21]], 2 +; CHECK-NEXT: [[TMP23:%.*]] = sub i32 [[TMP22]], 1 +; CHECK-NEXT: [[TMP24:%.*]] = extractelement [[VECTOR_RECUR]], i32 [[TMP23]] +; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP26:%.*]] = select i1 [[TMP25]], i64 [[TMP24]], i64 [[TMP20]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP26]] ; entry: br label %loop @@ -81,5 +80,4 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl128b,+zvl256b" } ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll index 8d3026e63748a..1ae1ba6795c01 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll @@ -87,13 +87,13 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -123,11 +123,11 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -280,13 +280,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -316,11 +316,11 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -473,13 +473,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -509,11 +509,11 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -665,13 +665,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; FIXED-V-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-V-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-V-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32> ; FIXED-V-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32> ; FIXED-V-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-V-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-V-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-V-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-V-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> @@ -701,11 +701,11 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; FIXED-ZVQDOTQ-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i64 8 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1 ; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll index 735fb769de8b9..671a929e6fa35 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll @@ -69,7 +69,7 @@ define i32 @sub(ptr %a, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP4]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP4]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP3]]) ; CHECK-NEXT: br label %[[EXIT:.*]] @@ -116,7 +116,7 @@ define i32 @addsub(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP6]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP5]]) ; CHECK-NEXT: br label %[[EXIT:.*]] @@ -166,7 +166,7 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -212,7 +212,7 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -258,7 +258,7 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -305,7 +305,7 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -353,7 +353,7 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -400,7 +400,7 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -444,7 +444,7 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -483,14 +483,14 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) " ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2]] = fadd fast <16 x half> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]]) @@ -508,7 +508,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) " ; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[ADD_LCSSA]] @@ -545,14 +545,14 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2]] = fadd fast <16 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]]) @@ -570,7 +570,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ ; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[ADD_LCSSA]] @@ -615,7 +615,7 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -661,7 +661,7 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -707,7 +707,7 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -755,7 +755,7 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -801,7 +801,7 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -847,7 +847,7 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -891,14 +891,14 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 8 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 8 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2]] = mul <8 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]]) @@ -916,7 +916,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MUL_LCSSA]] @@ -963,7 +963,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re ; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -986,7 +986,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re ; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]] ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MUL_LCSSA]] @@ -1036,7 +1036,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -1084,7 +1084,7 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] @@ -1128,18 +1128,18 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ , %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ splat (half 0xH8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[TMP2]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[TMP2]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x half>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x half>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD]], <16 x half> [[WIDE_LOAD3]], <16 x half> [[VEC_PHI]]) ; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]]) @@ -1159,7 +1159,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf ; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret half [[MULADD_LCSSA]] @@ -1198,18 +1198,18 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ , %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ splat (bfloat 0xR8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds bfloat, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds bfloat, ptr [[TMP2]], i32 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds bfloat, ptr [[TMP2]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x bfloat>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x bfloat>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD]], <16 x bfloat> [[WIDE_LOAD3]], <16 x bfloat> [[VEC_PHI]]) ; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]]) @@ -1229,7 +1229,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin ; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index 3c90908b0a08f..361ff00674758 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -71,7 +71,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -115,7 +115,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP11]]) ; CHECK-NEXT: br label [[FOR_BODY:%.*]] @@ -159,7 +159,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -199,7 +199,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -224,43 +224,37 @@ for.end: define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; CHECK-LABEL: @uniform_load( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK-NEXT: br label [[ENTRY:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP2:%.*]] = icmp uge [[TMP1]], [[BROADCAST_SPLAT1]] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[B:%.*]], align 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] -; CHECK-NEXT: store [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[ARRAYIDX]], splat (i1 true), i32 [[TMP0]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[TMP5]], [[IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP2]], i1 false) +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 0 +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[TMP8]] ; CHECK-NEXT: br label [[FOR_BODY1:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; CHECK-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: for.end: -; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], [[FOR_BODY1]] ], [ [[V]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[V_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP12]] ; entry: br label %for.body @@ -299,7 +293,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index a3bec999425a3..d6936a4871ed5 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -400,61 +400,54 @@ for.end: define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-LABEL: define i32 @FOR_reduction( ; IF-EVL-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[TC:%.*]]) #[[ATTR0]] { -; IF-EVL-NEXT: [[ENTRY:.*]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2 -; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TC]], [[TMP1]] -; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; IF-EVL-NEXT: [[ENTRY:.*:]] +; IF-EVL-NEXT: br label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: ; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 ; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 ; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 33, i32 [[TMP8]] ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] ; IF-EVL: [[VECTOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP4]], %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP9]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP9]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[TMP22:%.*]] = call @llvm.stepvector.nxv4i32() +; IF-EVL-NEXT: [[TMP23:%.*]] = icmp uge [[TMP22]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[WIDE_LOAD]] = load , ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[TMP10:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1) +; IF-EVL-NEXT: [[WIDE_LOAD]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[ARRAYIDX]], splat (i1 true), i32 [[TMP9]]) +; IF-EVL-NEXT: [[TMP10:%.*]] = call @llvm.experimental.vp.splice.nxv4i32( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP9]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw [[TMP10]], [[WIDE_LOAD]] ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store [[TMP11]], ptr [[TMP12]], align 4 -; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]] -; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP11]], ptr align 4 [[TMP12]], splat (i1 true), i32 [[TMP9]]) +; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[INDVARS]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; IF-EVL-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: +; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv4i1( [[TMP23]], i1 false) +; IF-EVL-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 [[TMP28]], 1 +; IF-EVL-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; IF-EVL-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 0 +; IF-EVL-NEXT: [[TMP21:%.*]] = extractelement [[WIDE_LOAD]], i64 [[TMP17]] ; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 ; IF-EVL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP16]] -; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() -; IF-EVL-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP17]], 4 -; IF-EVL-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 2 -; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP19]] -; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TC]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; IF-EVL: [[SCALAR_PH]]: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 33, %[[ENTRY]] ] -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP0]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: [[TMP25:%.*]] = extractelement [[VECTOR_RECUR]], i32 [[TMP16]] +; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[TMP28]], 0 +; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = select i1 [[TMP26]], i32 [[TMP25]], i32 [[TMP21]] +; IF-EVL-NEXT: br label %[[FOR_END:.*]] ; IF-EVL: [[FOR_END]]: -; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]] ; ; NO-VP-LABEL: define i32 @FOR_reduction( @@ -570,7 +563,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] ; IF-EVL: [[FOR_END]]: @@ -662,8 +655,7 @@ for.end: ; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} -; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} ;. ; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index b9a4e97cd9f24..cc1b2380bc532 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -108,7 +108,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]]) @@ -117,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -134,7 +134,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -152,7 +152,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 4 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]]) @@ -219,7 +219,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -303,7 +303,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -387,7 +387,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -471,7 +471,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -557,7 +557,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -643,7 +643,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -729,7 +729,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -815,7 +815,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -895,7 +895,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]]) @@ -904,7 +904,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -921,7 +921,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -939,7 +939,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]]) @@ -1007,7 +1007,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -1095,7 +1095,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -1179,14 +1179,14 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1204,7 +1204,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1224,7 +1224,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1287,14 +1287,14 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1312,7 +1312,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1332,7 +1332,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[MINMAX_IDENT_SPLAT]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1401,7 +1401,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.end: @@ -1492,7 +1492,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] @@ -1584,7 +1584,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index 7179e7dc48c8d..d1a2303e35e68 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -110,14 +110,14 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] ; IF-EVL-NEXT: [[TMP4]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]]) @@ -135,7 +135,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -154,7 +154,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD]], [[VEC_PHI]] @@ -221,7 +221,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -308,7 +308,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32( [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -395,7 +395,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -484,7 +484,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -577,7 +577,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -670,7 +670,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -763,7 +763,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -854,7 +854,7 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP14]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -937,14 +937,14 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ splat (float 1.000000e+00), [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 8 +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc <8 x float> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] ; IF-EVL-NEXT: [[TMP4]] = fmul reassoc <8 x float> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = fmul reassoc <8 x float> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]]) @@ -962,7 +962,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -981,7 +981,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ splat (float 1.000000e+00), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[TMP5]] = fmul reassoc <8 x float> [[WIDE_LOAD]], [[VEC_PHI]] @@ -1050,7 +1050,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -1143,7 +1143,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP15]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -1230,14 +1230,14 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1255,7 +1255,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1275,7 +1275,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; NO-VP-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1338,14 +1338,14 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1363,7 +1363,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1383,7 +1383,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; NO-VP-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; NO-VP-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_LOAD]]) @@ -1452,7 +1452,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP17]]) ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] @@ -1544,7 +1544,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] @@ -1636,7 +1636,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index 7b0ac78fb365c..13990000585ea 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -331,20 +331,20 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] ; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[INDEX]] ; NO-VP-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]] -; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 0 -; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 -15 +; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 0 +; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 -15 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; NO-VP-NEXT: [[REVERSE:%.*]] = shufflevector <16 x i8> [[WIDE_LOAD]], <16 x i8> poison, <16 x i32> ; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B:%.*]], <16 x i8> [[REVERSE]] ; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> align 1 [[TMP3]], <16 x i1> splat (i1 true), <16 x i8> poison) ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[OFFSET_IDX]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0 -; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 -15 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 0 +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i64 -15 ; NO-VP-NEXT: [[REVERSE1:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_GATHER]], <16 x i8> poison, <16 x i32> ; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP6]], align 1 ; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[OFFSET_IDX]] -; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 0 -; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i32 -15 +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 0 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 -15 ; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP9]], align 1 ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 0375f0a8fd132..9a361e0b48a61 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -44,7 +44,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -109,44 +109,38 @@ for.end: define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: [[ENTRY:.*:]] +; SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; SCALABLE-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; SCALABLE-NEXT: [[TMP2:%.*]] = icmp uge [[TMP1]], [[BROADCAST_SPLAT1]] ; SCALABLE-NEXT: [[TMP6:%.*]] = load i64, ptr [[B]], align 8 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP6]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], splat (i1 true), i32 [[TMP0]]) +; SCALABLE-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALABLE-NEXT: [[FIRST_INACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP2]], i1 false) +; SCALABLE-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[FIRST_INACTIVE_LANE]], 1 +; SCALABLE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP7]], 2 +; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 [[TMP11]], 0 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[LAST_ACTIVE_LANE]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: -; SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; SCALABLE-NEXT: ret i64 [[V_LCSSA]] +; SCALABLE-NEXT: ret i64 [[TMP12]] ; ; FIXEDLEN-LABEL: define i64 @uniform_load_outside_use( ; FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { @@ -160,7 +154,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -184,44 +178,38 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; ; TF-SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; TF-SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-SCALABLE-NEXT: [[ENTRY:.*]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; TF-SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; TF-SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; TF-SCALABLE-NEXT: [[ENTRY:.*:]] +; TF-SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 +; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; TF-SCALABLE-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; TF-SCALABLE-NEXT: [[TMP2:%.*]] = icmp uge [[TMP1]], [[BROADCAST_SPLAT1]] ; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]] -; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[ARRAYIDX]], splat (i1 true), i32 [[TMP0]]) +; TF-SCALABLE-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[IV]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; TF-SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; TF-SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: -; TF-SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; TF-SCALABLE: [[SCALAR_PH]]: -; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; TF-SCALABLE-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TF-SCALABLE-NEXT: [[FIRST_INACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP2]], i1 false) +; TF-SCALABLE-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[FIRST_INACTIVE_LANE]], 1 +; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 0 +; TF-SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[LAST_ACTIVE_LANE]] +; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: -; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], %[[FOR_BODY]] ], [ [[V]], %[[MIDDLE_BLOCK]] ] -; TF-SCALABLE-NEXT: ret i64 [[V_LCSSA]] +; TF-SCALABLE-NEXT: ret i64 [[TMP12]] ; entry: br label %for.body @@ -269,7 +257,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -294,7 +282,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; FIXEDLEN-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP1]], <4 x i64> [[WIDE_MASKED_GATHER]], <4 x i64> zeroinitializer ; FIXEDLEN-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> [[WIDE_MASKED_GATHER1]], <4 x i64> zeroinitializer ; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 4 +; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP5]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -350,7 +338,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -399,7 +387,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; SCALABLE-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -417,7 +405,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; FIXEDLEN-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; FIXEDLEN-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; FIXEDLEN-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 +; FIXEDLEN-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -457,7 +445,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -499,7 +487,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -517,7 +505,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 8 ; FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 +; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -557,7 +545,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -608,7 +596,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -630,7 +618,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; FIXEDLEN-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 7 ; FIXEDLEN-NEXT: store i64 [[TMP4]], ptr [[B]], align 8 ; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 4 +; FIXEDLEN-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -679,7 +667,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -731,7 +719,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -756,7 +744,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; FIXEDLEN-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[BROADCAST_SPLAT]], <4 x ptr> align 8 [[BROADCAST_SPLAT2]], <4 x i1> [[TMP1]]) ; FIXEDLEN-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[BROADCAST_SPLAT]], <4 x ptr> align 8 [[BROADCAST_SPLAT2]], <4 x i1> [[TMP2]]) ; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 4 +; FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -812,7 +800,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -860,7 +848,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -878,7 +866,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; FIXEDLEN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FIXEDLEN-NEXT: store i64 [[V]], ptr [[B]], align 1 ; FIXEDLEN-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4 +; FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 4 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 8 ; FIXEDLEN-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP3]], align 8 ; FIXEDLEN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -918,7 +906,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: diff --git a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll index 0287645d9d7f9..94ebf01509ec2 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll @@ -126,9 +126,9 @@ define void @conversion_cost2(i32 %n, ptr nocapture %A, ptr nocapture %B) nounwi ; CHECK-NEXT: [[TMP19:%.*]] = sitofp <2 x i64> [[TMP10]] to <2 x float> ; CHECK-NEXT: [[TMP20:%.*]] = sitofp <2 x i64> [[TMP11]] to <2 x float> ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 2 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 4 -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 6 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 2 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 4 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 6 ; CHECK-NEXT: store <2 x float> [[TMP12]], ptr [[TMP13]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP18]], ptr [[TMP15]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP19]], ptr [[TMP16]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index b3c45a565a8fe..c70a3aa249919 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -385,7 +385,7 @@ define void @multi_exit(ptr %dst, ptr %src.1, ptr %src.2, i64 %A, i64 %B) #0 { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[SRC_3]], i32 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i32 2 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP25]], align 8, !alias.scope [[META9:![0-9]+]] ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq <2 x i64> [[WIDE_LOAD]], zeroinitializer ; CHECK-NEXT: [[TMP27:%.*]] = and <2 x i1> [[TMP23]], [[TMP26]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll index 3165422dcc539..d19ae728cc913 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll @@ -23,8 +23,8 @@ define i1 @fn(ptr %nno) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i64> [[VEC_IND]], splat (i64 1) ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP23]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 -3 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP23]], i64 0 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP4]], i64 -3 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP6]], <4 x i1> [[REVERSE]], <4 x i32> poison) ; CHECK-NEXT: [[REVERSE1:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> poison, <4 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll index 6e940ee58fabe..a1b92e0658bd3 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll @@ -161,9 +161,9 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[STEP_ADD_2]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = sub <16 x i16> [[STEP_ADD_3]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[K:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 16 -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 32 -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 48 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 16 +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 32 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i64 48 ; CHECK-NEXT: store <16 x i16> [[TMP4]], ptr [[TMP8]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP10]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP6]], ptr [[TMP21]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll index 12b8d1e15b523..84579d97b38e2 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll @@ -26,7 +26,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -34,7 +34,7 @@ define void @firstorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP9:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[TMP7]] ; CHECK-NEXT: [[TMP10:%.*]] = add <16 x i8> [[WIDE_LOAD1]], [[TMP8]] ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP9]], ptr [[TMP11]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP14]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 @@ -119,7 +119,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[VECTOR_RECUR4:%.*]] = phi <16 x i8> [ [[VECTOR_RECUR_INIT3]], [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5]] = load <16 x i8>, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <16 x i8> [[VECTOR_RECUR]], <16 x i8> [[WIDE_LOAD]], <16 x i32> @@ -135,7 +135,7 @@ define void @thirdorderrec(ptr nocapture noundef readonly %x, ptr noalias nocapt ; CHECK-NEXT: [[TMP17:%.*]] = add <16 x i8> [[TMP15]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP18:%.*]] = add <16 x i8> [[TMP16]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i32 16 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i64 16 ; CHECK-NEXT: store <16 x i8> [[TMP17]], ptr [[TMP19]], align 1 ; CHECK-NEXT: store <16 x i8> [[TMP18]], ptr [[TMP22]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll index 39217e51ab117..41249c595f9eb 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll @@ -41,9 +41,9 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 { ; AUTO_VEC-NEXT: [[STEP_ADD2:%.*]] = fadd fast <8 x float> [[STEP_ADD]], splat (float 4.000000e+00) ; AUTO_VEC-NEXT: [[STEP_ADD3:%.*]] = fadd fast <8 x float> [[STEP_ADD2]], splat (float 4.000000e+00) ; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 -; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 16 -; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 24 +; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 +; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 16 +; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 24 ; AUTO_VEC-NEXT: store <8 x float> [[VEC_IND]], ptr [[TMP1]], align 4 ; AUTO_VEC-NEXT: store <8 x float> [[STEP_ADD]], ptr [[TMP2]], align 4 ; AUTO_VEC-NEXT: store <8 x float> [[STEP_ADD2]], ptr [[TMP3]], align 4 @@ -208,9 +208,9 @@ define double @external_use_with_fast_math(ptr %a, i64 %n) { ; AUTO_VEC-NEXT: [[STEP_ADD_2:%.*]] = fadd fast <4 x double> [[STEP_ADD]], splat (double 1.200000e+01) ; AUTO_VEC-NEXT: [[STEP_ADD_3:%.*]] = fadd fast <4 x double> [[STEP_ADD_2]], splat (double 1.200000e+01) ; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i32 4 -; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[TMP1]], i32 8 -; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[TMP1]], i32 12 +; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i64 4 +; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[TMP1]], i64 8 +; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[TMP1]], i64 12 ; AUTO_VEC-NEXT: store <4 x double> [[VEC_IND]], ptr [[TMP1]], align 8 ; AUTO_VEC-NEXT: store <4 x double> [[STEP_ADD]], ptr [[TMP2]], align 8 ; AUTO_VEC-NEXT: store <4 x double> [[STEP_ADD_2]], ptr [[TMP3]], align 8 @@ -326,9 +326,9 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) { ; AUTO_VEC-NEXT: [[STEP_ADD2:%.*]] = fadd reassoc <8 x float> [[STEP_ADD]], splat (float 3.360000e+02) ; AUTO_VEC-NEXT: [[STEP_ADD3:%.*]] = fadd reassoc <8 x float> [[STEP_ADD2]], splat (float 3.360000e+02) ; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 [[INDEX]] -; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 -; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 16 -; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 24 +; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 8 +; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 16 +; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 24 ; AUTO_VEC-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; AUTO_VEC-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 ; AUTO_VEC-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP4]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll index a0637ceb53cf2..137c09b653f2c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fminimumnum.ll @@ -22,17 +22,17 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -99,17 +99,17 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[TMP5]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> [[WIDE_LOAD5]], <4 x float> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x float], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i32 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 4 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -176,17 +176,17 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -253,17 +253,17 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[INPUT2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw double, ptr [[TMP5]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x double>, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD6]]) ; CHECK-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[WIDE_LOAD5]], <2 x double> [[WIDE_LOAD7]]) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [4096 x double], ptr [[OUTPUT]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw double, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr [[TMP10]], align 8 ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP12]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll index 877fcd4d638eb..34a99b07ee93e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll @@ -75,7 +75,7 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[VEC_PHI1:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[INDEX]] -; SSE-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[TMP2]], i32 2 +; SSE-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[TMP2]], i64 2 ; SSE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 ; SSE-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 ; SSE-NEXT: [[TMP6:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD]], splat (double 4.200000e+01) @@ -106,9 +106,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX-NEXT: [[VEC_PHI2:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI8:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI9:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[INDEX]] -; AVX-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP4]], i32 4 -; AVX-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[TMP4]], i32 8 -; AVX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP4]], i32 12 +; AVX-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP4]], i64 4 +; AVX-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[TMP4]], i64 8 +; AVX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP4]], i64 12 ; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP4]], align 8 ; AVX-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x double>, ptr [[TMP9]], align 8 ; AVX-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x double>, ptr [[TMP10]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll index 4028dd87e34b3..04bff3c393f62 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll @@ -125,13 +125,13 @@ define void @multiple_truncated_ivs_with_wide_uses(i1 %c, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C]], <4 x i16> [[VEC_IND]], <4 x i16> splat (i16 10) ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[C]], <4 x i16> [[STEP_ADD]], <4 x i16> splat (i16 10) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP1]], ptr [[TMP4]], align 2, !alias.scope [[META6:![0-9]+]], !noalias [[META9:![0-9]+]] -; CHECK-NEXT: store <4 x i16> [[TMP2]], ptr [[TMP7]], align 2, !alias.scope [[META6]], !noalias [[META9]] +; CHECK-NEXT: store <4 x i16> [[TMP2]], ptr [[TMP3]], align 2, !alias.scope [[META6]], !noalias [[META9]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP8]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP8]], i64 4 ; CHECK-NEXT: store <4 x i32> [[VEC_IND3]], ptr [[TMP8]], align 4, !alias.scope [[META9]] -; CHECK-NEXT: store <4 x i32> [[STEP_ADD4]], ptr [[TMP11]], align 4, !alias.scope [[META9]] +; CHECK-NEXT: store <4 x i32> [[STEP_ADD4]], ptr [[TMP5]], align 4, !alias.scope [[META9]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[STEP_ADD]], splat (i16 4) ; CHECK-NEXT: [[VEC_IND_NEXT6]] = add <4 x i32> [[STEP_ADD4]], splat (i32 4) @@ -192,7 +192,7 @@ define void @truncated_ivs_with_wide_and_scalar_uses(i1 %c, ptr %dst) { ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[DST]], i32 [[TMP0]] ; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[C]], <8 x i16> [[VEC_IND]], <8 x i16> splat (i16 10) ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[STEP_ADD]], <8 x i16> splat (i16 10) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[TMP3]], i32 8 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[TMP3]], i64 8 ; CHECK-NEXT: store <8 x i16> [[TMP5]], ptr [[TMP3]], align 2 ; CHECK-NEXT: store <8 x i16> [[TMP6]], ptr [[TMP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 @@ -477,9 +477,9 @@ define i32 @test_scalar_predicated_cost(i64 %x, i64 %y, ptr %A) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = trunc <8 x i64> [[TMP13]] to <8 x i32> ; CHECK-NEXT: [[TMP22:%.*]] = trunc <8 x i64> [[TMP14]] to <8 x i32> ; CHECK-NEXT: [[TMP23:%.*]] = trunc <8 x i64> [[TMP15]] to <8 x i32> -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i32 8 -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i32 16 -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i32 24 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i64 8 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i64 16 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i64 24 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP20]], ptr align 4 [[TMP16]], <8 x i1> [[TMP8]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP21]], ptr align 4 [[TMP25]], <8 x i1> [[TMP9]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP22]], ptr align 4 [[TMP26]], <8 x i1> [[TMP10]]) diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll index 61f07eff768c1..d25d9f81de985 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll @@ -27,7 +27,7 @@ define i16 @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i16> [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[TMP5]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP4]], ptr [[TMP5]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP9]], ptr [[TMP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -99,7 +99,7 @@ define i16 @wide_sub_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[STEP_ADD]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr [[TMP6]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP10]], ptr [[TMP9]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll index d75fd0e0023f7..ad6dfb054b726 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll @@ -32,9 +32,9 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[OFFSET_IDX2]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 16 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 32 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 48 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 16 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 32 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i16>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i16>, ptr [[TMP2]], align 2 @@ -43,9 +43,9 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[WIDE_LOAD4]], <16 x i16> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[WIDE_LOAD5]], <16 x i16> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> [[WIDE_LOAD6]], <16 x i16> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 16 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 32 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 48 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 48 ; CHECK-NEXT: store <16 x i16> [[TMP4]], ptr [[NEXT_GEP3]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP8]], align 2 ; CHECK-NEXT: store <16 x i16> [[TMP6]], ptr [[TMP9]], align 2 @@ -160,9 +160,9 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 96 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 32 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 64 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 96 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <32 x i8>, ptr [[TMP1]], align 2 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <32 x i8>, ptr [[TMP2]], align 2 @@ -171,9 +171,9 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[WIDE_LOAD3]], <32 x i8> [[WIDE_LOAD3]], <32 x i8> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP6:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[WIDE_LOAD4]], <32 x i8> [[WIDE_LOAD4]], <32 x i8> [[BROADCAST_SPLAT]]) ; CHECK-NEXT: [[TMP7:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[WIDE_LOAD5]], <32 x i8> [[WIDE_LOAD5]], <32 x i8> [[BROADCAST_SPLAT]]) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 32 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 64 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 96 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 32 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 64 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 96 ; CHECK-NEXT: store <32 x i8> [[TMP4]], ptr [[NEXT_GEP2]], align 2 ; CHECK-NEXT: store <32 x i8> [[TMP5]], ptr [[TMP8]], align 2 ; CHECK-NEXT: store <32 x i8> [[TMP6]], ptr [[TMP9]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll index b710236c026d2..751e885733f17 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll @@ -38,9 +38,9 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b) ; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 16 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 32 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 48 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 16 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 32 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 48 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i32>, ptr [[TMP2]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i32>, ptr [[TMP3]], align 8, !alias.scope [[META0]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll b/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll index bcb6b5c422343..a247285317a1e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/iv-live-outs.ll @@ -18,9 +18,9 @@ define i64 @test_pr98660(ptr %dst, i64 %N) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP9]], i32 8 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP9]], i32 16 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 24 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP9]], i64 8 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP9]], i64 16 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP14]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP15]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll index 6e3b2a5390948..ea3ec99cf46e1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll @@ -193,17 +193,17 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 64 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 64 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 64 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i8>, ptr [[TMP5]], align 64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 4 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 12 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP6]], align 64 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD1]], ptr [[TMP8]], align 64 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD2]], ptr [[TMP9]], align 64 diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll index 6605338771c47..78363e13595cb 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll @@ -39,9 +39,9 @@ define i32 @test_explicit_pred(i64 %len) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp slt <4 x i64> [[STEP_ADD1]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <4 x i64> [[STEP_ADD2]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 4 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP8]], i32 8 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP8]], i32 12 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i64 4 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP8]], i64 8 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP8]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4 @@ -171,9 +171,9 @@ define i32 @test_explicit_pred_generic(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP64]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP69]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP70]], align 4 @@ -718,9 +718,9 @@ define i32 @test_max_trip_count(i64 %len, ptr %test_base, i64 %n) { ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 2 ; CHECK-NEXT: [[TMP64:%.*]] = insertelement <4 x i1> [[TMP63]], i1 [[TMP60]], i32 3 ; CHECK-NEXT: [[TMP65:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i32 4 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i32 8 -; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i32 12 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i64 4 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i64 8 +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP65]], <4 x i1> [[TMP40]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP48]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP71]], <4 x i1> [[TMP56]], <4 x i32> poison) @@ -877,9 +877,9 @@ define i32 @test_non_zero_start(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP64]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP69]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP70]], align 4 @@ -1231,9 +1231,9 @@ define i32 @neg_off_by_many(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1362,9 +1362,9 @@ define i32 @neg_off_by_one_iteration(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1493,9 +1493,9 @@ define i32 @neg_off_by_one_byte(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1633,9 +1633,9 @@ define i32 @test_constant_max(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 2 ; CHECK-NEXT: [[TMP64:%.*]] = insertelement <4 x i1> [[TMP63]], i1 [[TMP60]], i32 3 ; CHECK-NEXT: [[TMP65:%.*]] = getelementptr i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i32 4 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i32 8 -; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i32 12 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP65]], i64 4 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP65]], i64 8 +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr i32, ptr [[TMP65]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP65]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP70]], align 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP71]], align 4 @@ -1793,9 +1793,9 @@ define i32 @test_allocsize(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCATION]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -1925,9 +1925,9 @@ define i32 @test_allocsize_array(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCATION]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) @@ -2067,9 +2067,9 @@ define i32 @test_allocsize_cond_deref(i1 %allzero, ptr %test_base) { ; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 ; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 ; CHECK-NEXT: [[TMP64:%.*]] = getelementptr i32, ptr [[ALLOCATION]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i32 4 -; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i32 8 -; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i32 12 +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr i32, ptr [[TMP64]], i64 4 +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr i32, ptr [[TMP64]], i64 8 +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr i32, ptr [[TMP64]], i64 12 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP64]], <4 x i1> [[TMP39]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP69]], <4 x i1> [[TMP47]], <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[TMP70]], <4 x i1> [[TMP55]], <4 x i32> poison) diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll index 2c172b2aecd16..1d0906902ad62 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked-store-cost.ll @@ -35,9 +35,9 @@ define i32 @test_scalar_predicated_cost(i64 %x, i64 %y, ptr %A) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = trunc <8 x i64> [[TMP13]] to <8 x i32> ; CHECK-NEXT: [[TMP22:%.*]] = trunc <8 x i64> [[TMP14]] to <8 x i32> ; CHECK-NEXT: [[TMP23:%.*]] = trunc <8 x i64> [[TMP15]] to <8 x i32> -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i32 8 -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i32 16 -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i32 24 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP16]], i64 8 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[TMP16]], i64 16 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP16]], i64 24 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP20]], ptr align 4 [[TMP16]], <8 x i1> [[TMP8]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP21]], ptr align 4 [[TMP25]], <8 x i1> [[TMP9]]) ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP22]], ptr align 4 [[TMP26]], <8 x i1> [[TMP10]]) @@ -199,7 +199,7 @@ define void @test_scalar_cost_single_store_loop_varying_cond(ptr %dst, ptr noali ; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <16 x i32> [[WIDE_VEC4]], <16 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i32> [[STRIDED_VEC]], splat (i32 123) ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <4 x i32> [[STRIDED_VEC5]], splat (i32 123) -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 4 ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[NEXT_GEP]], <4 x i1> [[TMP8]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP11]], <4 x i1> [[TMP9]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll index 932153a23bdbd..e4977ee642b09 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -72,9 +72,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 24 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 24 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; AVX2-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4 ; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 @@ -84,9 +84,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP10:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP11:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i32 8 -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i32 16 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i32 24 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i64 8 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i64 16 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i64 24 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP12]], <8 x i1> [[TMP8]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP14]], <8 x i1> [[TMP9]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP15]], <8 x i1> [[TMP10]], <8 x i32> poison) @@ -96,9 +96,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP19:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX2-NEXT: [[TMP20:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX2-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i32 8 -; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i32 16 -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i32 24 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i64 8 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i64 16 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i64 24 ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP17]], ptr align 4 [[TMP21]], <8 x i1> [[TMP8]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP18]], ptr align 4 [[TMP23]], <8 x i1> [[TMP9]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP19]], ptr align 4 [[TMP24]], <8 x i1> [[TMP10]]) @@ -151,9 +151,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 32 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 48 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 32 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 48 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4 ; AVX512-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP5]], align 4 ; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP6]], align 4 @@ -163,9 +163,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP10:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP11:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i32 16 -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i32 32 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i32 48 +; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP12]], i64 16 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i64 32 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP12]], i64 48 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr align 4 [[TMP12]], <16 x i1> [[TMP8]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr align 4 [[TMP14]], <16 x i1> [[TMP9]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr align 4 [[TMP15]], <16 x i1> [[TMP10]], <16 x i32> poison) @@ -175,9 +175,9 @@ define void @foo1(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP19:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX512-NEXT: [[TMP20:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX512-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i32 16 -; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i32 32 -; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i32 48 +; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP21]], i64 16 +; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP21]], i64 32 +; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP21]], i64 48 ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP17]], ptr align 4 [[TMP21]], <16 x i1> [[TMP8]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP18]], ptr align 4 [[TMP23]], <16 x i1> [[TMP9]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP19]], ptr align 4 [[TMP24]], <16 x i1> [[TMP10]]) @@ -293,9 +293,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 8 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 16 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 24 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 16 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 24 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr addrspace(1) [[TMP3]], align 4 ; AVX2-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr addrspace(1) [[TMP5]], align 4 ; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr addrspace(1) [[TMP6]], align 4 @@ -305,9 +305,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX2-NEXT: [[TMP10:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP11:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr addrspace(1) [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 8 -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 16 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 24 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 8 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 16 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 24 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 4 [[TMP12]], <8 x i1> [[TMP8]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 4 [[TMP14]], <8 x i1> [[TMP9]], <8 x i32> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 4 [[TMP15]], <8 x i1> [[TMP10]], <8 x i32> poison) @@ -317,9 +317,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX2-NEXT: [[TMP19:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX2-NEXT: [[TMP20:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX2-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr addrspace(1) [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 8 -; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 16 -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 24 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 8 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 16 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 24 ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP17]], ptr addrspace(1) align 4 [[TMP21]], <8 x i1> [[TMP8]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP18]], ptr addrspace(1) align 4 [[TMP23]], <8 x i1> [[TMP9]]) ; AVX2-NEXT: call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP19]], ptr addrspace(1) align 4 [[TMP24]], <8 x i1> [[TMP10]]) @@ -372,9 +372,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 16 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 32 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i32 48 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 32 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[TMP3]], i64 48 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr addrspace(1) [[TMP3]], align 4 ; AVX512-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr addrspace(1) [[TMP5]], align 4 ; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr addrspace(1) [[TMP6]], align 4 @@ -384,9 +384,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX512-NEXT: [[TMP10:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP11:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr addrspace(1) [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 16 -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 32 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i32 48 +; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 16 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 32 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP12]], i64 48 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p1(ptr addrspace(1) align 4 [[TMP12]], <16 x i1> [[TMP8]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p1(ptr addrspace(1) align 4 [[TMP14]], <16 x i1> [[TMP9]], <16 x i32> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p1(ptr addrspace(1) align 4 [[TMP15]], <16 x i1> [[TMP10]], <16 x i32> poison) @@ -396,9 +396,9 @@ define void @foo1_addrspace1(ptr addrspace(1) nocapture %A, ptr addrspace(1) noc ; AVX512-NEXT: [[TMP19:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_LOAD6]] ; AVX512-NEXT: [[TMP20:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD10]], [[WIDE_LOAD7]] ; AVX512-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr addrspace(1) [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 16 -; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 32 -; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i32 48 +; AVX512-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 16 +; AVX512-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 32 +; AVX512-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr addrspace(1) [[TMP21]], i64 48 ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p1(<16 x i32> [[TMP17]], ptr addrspace(1) align 4 [[TMP21]], <16 x i1> [[TMP8]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p1(<16 x i32> [[TMP18]], ptr addrspace(1) align 4 [[TMP23]], <16 x i1> [[TMP9]]) ; AVX512-NEXT: call void @llvm.masked.store.v16i32.p1(<16 x i32> [[TMP19]], ptr addrspace(1) align 4 [[TMP24]], <16 x i1> [[TMP10]]) @@ -524,9 +524,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 24 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 24 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 ; AVX2-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4 ; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4 @@ -536,9 +536,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP10:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP11:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i32 8 -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i32 16 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i32 24 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i64 8 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i64 16 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i64 24 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr align 4 [[TMP12]], <8 x i1> [[TMP8]], <8 x float> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr align 4 [[TMP14]], <8 x i1> [[TMP9]], <8 x float> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr align 4 [[TMP15]], <8 x i1> [[TMP10]], <8 x float> poison) @@ -552,9 +552,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP23:%.*]] = fadd <8 x float> [[WIDE_MASKED_LOAD9]], [[TMP19]] ; AVX2-NEXT: [[TMP24:%.*]] = fadd <8 x float> [[WIDE_MASKED_LOAD10]], [[TMP20]] ; AVX2-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i32 8 -; AVX2-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i32 16 -; AVX2-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i32 24 +; AVX2-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i64 8 +; AVX2-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i64 16 +; AVX2-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i64 24 ; AVX2-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[TMP21]], ptr align 4 [[TMP25]], <8 x i1> [[TMP8]]) ; AVX2-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[TMP22]], ptr align 4 [[TMP27]], <8 x i1> [[TMP9]]) ; AVX2-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[TMP23]], ptr align 4 [[TMP28]], <8 x i1> [[TMP10]]) @@ -608,9 +608,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 16 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 32 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 48 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 32 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 48 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4 ; AVX512-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP5]], align 4 ; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP6]], align 4 @@ -620,9 +620,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP10:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP11:%.*]] = icmp slt <16 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i32 16 -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i32 32 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i32 48 +; AVX512-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP12]], i64 16 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr float, ptr [[TMP12]], i64 32 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr float, ptr [[TMP12]], i64 48 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x float> @llvm.masked.load.v16f32.p0(ptr align 4 [[TMP12]], <16 x i1> [[TMP8]], <16 x float> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD8:%.*]] = call <16 x float> @llvm.masked.load.v16f32.p0(ptr align 4 [[TMP14]], <16 x i1> [[TMP9]], <16 x float> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <16 x float> @llvm.masked.load.v16f32.p0(ptr align 4 [[TMP15]], <16 x i1> [[TMP10]], <16 x float> poison) @@ -636,9 +636,9 @@ define void @foo2(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP23:%.*]] = fadd <16 x float> [[WIDE_MASKED_LOAD9]], [[TMP19]] ; AVX512-NEXT: [[TMP24:%.*]] = fadd <16 x float> [[WIDE_MASKED_LOAD10]], [[TMP20]] ; AVX512-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i32 16 -; AVX512-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i32 32 -; AVX512-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i32 48 +; AVX512-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP25]], i64 16 +; AVX512-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP25]], i64 32 +; AVX512-NEXT: [[TMP29:%.*]] = getelementptr float, ptr [[TMP25]], i64 48 ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0(<16 x float> [[TMP21]], ptr align 4 [[TMP25]], <16 x i1> [[TMP8]]) ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0(<16 x float> [[TMP22]], ptr align 4 [[TMP27]], <16 x i1> [[TMP9]]) ; AVX512-NEXT: call void @llvm.masked.store.v16f32.p0(<16 x float> [[TMP23]], ptr align 4 [[TMP28]], <16 x i1> [[TMP10]]) @@ -732,25 +732,25 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX1: [[VECTOR_BODY]]: ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 -; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 12 +; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 +; AVX1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 12 ; AVX1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META8:![0-9]+]] -; AVX1-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META8]] -; AVX1-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META8]] -; AVX1-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META8]] +; AVX1-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META8]] +; AVX1-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META8]] +; AVX1-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META8]] ; AVX1-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD]], splat (i32 100) ; AVX1-NEXT: [[TMP7:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX1-NEXT: [[TMP8:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX1-NEXT: [[TMP9:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD8]], splat (i32 100) ; AVX1-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i32 4 -; AVX1-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i32 8 -; AVX1-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP10]], i32 12 +; AVX1-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i64 4 +; AVX1-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 8 +; AVX1-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP10]], i64 12 ; AVX1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP10]], <4 x i1> [[TMP6]], <4 x double> poison), !alias.scope [[META11:![0-9]+]] ; AVX1-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP12]], <4 x i1> [[TMP7]], <4 x double> poison), !alias.scope [[META11]] ; AVX1-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP8]], <4 x double> poison), !alias.scope [[META11]] -; AVX1-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP14]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META11]] +; AVX1-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP11]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META11]] ; AVX1-NEXT: [[TMP15:%.*]] = sitofp <4 x i32> [[WIDE_LOAD]] to <4 x double> ; AVX1-NEXT: [[TMP16:%.*]] = sitofp <4 x i32> [[WIDE_LOAD6]] to <4 x double> ; AVX1-NEXT: [[TMP17:%.*]] = sitofp <4 x i32> [[WIDE_LOAD7]] to <4 x double> @@ -760,13 +760,13 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX1-NEXT: [[TMP21:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD10]], [[TMP17]] ; AVX1-NEXT: [[TMP22:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD11]], [[TMP18]] ; AVX1-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 4 -; AVX1-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 8 -; AVX1-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 12 +; AVX1-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[TMP23]], i64 4 +; AVX1-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 8 +; AVX1-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 12 ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP19]], ptr align 8 [[TMP23]], <4 x i1> [[TMP6]]), !alias.scope [[META13:![0-9]+]], !noalias [[META15:![0-9]+]] -; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP25]], <4 x i1> [[TMP7]]), !alias.scope [[META13]], !noalias [[META15]] -; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP26]], <4 x i1> [[TMP8]]), !alias.scope [[META13]], !noalias [[META15]] -; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP27]], <4 x i1> [[TMP9]]), !alias.scope [[META13]], !noalias [[META15]] +; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP24]], <4 x i1> [[TMP7]]), !alias.scope [[META13]], !noalias [[META15]] +; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP25]], <4 x i1> [[TMP8]]), !alias.scope [[META13]], !noalias [[META15]] +; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP26]], <4 x i1> [[TMP9]]), !alias.scope [[META13]], !noalias [[META15]] ; AVX1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX1-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; AVX1-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] @@ -795,25 +795,25 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 12 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 4 +; AVX2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 12 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META12:![0-9]+]] -; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] -; AVX2-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] -; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META12]] +; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] +; AVX2-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META12]] +; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] ; AVX2-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD]], splat (i32 100) ; AVX2-NEXT: [[TMP7:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX2-NEXT: [[TMP8:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX2-NEXT: [[TMP9:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD8]], splat (i32 100) ; AVX2-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i32 4 -; AVX2-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i32 8 -; AVX2-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP10]], i32 12 +; AVX2-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i64 4 +; AVX2-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 8 +; AVX2-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP10]], i64 12 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP10]], <4 x i1> [[TMP6]], <4 x double> poison), !alias.scope [[META15:![0-9]+]] ; AVX2-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP12]], <4 x i1> [[TMP7]], <4 x double> poison), !alias.scope [[META15]] ; AVX2-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP8]], <4 x double> poison), !alias.scope [[META15]] -; AVX2-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP14]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META15]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP11]], <4 x i1> [[TMP9]], <4 x double> poison), !alias.scope [[META15]] ; AVX2-NEXT: [[TMP15:%.*]] = sitofp <4 x i32> [[WIDE_LOAD]] to <4 x double> ; AVX2-NEXT: [[TMP16:%.*]] = sitofp <4 x i32> [[WIDE_LOAD6]] to <4 x double> ; AVX2-NEXT: [[TMP17:%.*]] = sitofp <4 x i32> [[WIDE_LOAD7]] to <4 x double> @@ -823,13 +823,13 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX2-NEXT: [[TMP21:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD10]], [[TMP17]] ; AVX2-NEXT: [[TMP22:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD11]], [[TMP18]] ; AVX2-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 4 -; AVX2-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 8 -; AVX2-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 12 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[TMP23]], i64 4 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 8 +; AVX2-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 12 ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP19]], ptr align 8 [[TMP23]], <4 x i1> [[TMP6]]), !alias.scope [[META17:![0-9]+]], !noalias [[META19:![0-9]+]] -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP25]], <4 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP26]], <4 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP27]], <4 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP20]], ptr align 8 [[TMP24]], <4 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP21]], ptr align 8 [[TMP25]], <4 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP22]], ptr align 8 [[TMP26]], <4 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] ; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX2-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; AVX2-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] @@ -860,25 +860,25 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 8 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 16 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 24 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8 +; AVX512-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 16 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 24 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META12:![0-9]+]] -; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] -; AVX512-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] -; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META12]] +; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META12]] +; AVX512-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META12]] +; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META12]] ; AVX512-NEXT: [[TMP6:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD]], splat (i32 100) ; AVX512-NEXT: [[TMP7:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD6]], splat (i32 100) ; AVX512-NEXT: [[TMP8:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD7]], splat (i32 100) ; AVX512-NEXT: [[TMP9:%.*]] = icmp slt <8 x i32> [[WIDE_LOAD8]], splat (i32 100) ; AVX512-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i32 8 -; AVX512-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i32 16 -; AVX512-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP10]], i32 24 +; AVX512-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP10]], i64 8 +; AVX512-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 16 +; AVX512-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP10]], i64 24 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP10]], <8 x i1> [[TMP6]], <8 x double> poison), !alias.scope [[META15:![0-9]+]] ; AVX512-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP12]], <8 x i1> [[TMP7]], <8 x double> poison), !alias.scope [[META15]] ; AVX512-NEXT: [[WIDE_MASKED_LOAD10:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP13]], <8 x i1> [[TMP8]], <8 x double> poison), !alias.scope [[META15]] -; AVX512-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP14]], <8 x i1> [[TMP9]], <8 x double> poison), !alias.scope [[META15]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD11:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP11]], <8 x i1> [[TMP9]], <8 x double> poison), !alias.scope [[META15]] ; AVX512-NEXT: [[TMP15:%.*]] = sitofp <8 x i32> [[WIDE_LOAD]] to <8 x double> ; AVX512-NEXT: [[TMP16:%.*]] = sitofp <8 x i32> [[WIDE_LOAD6]] to <8 x double> ; AVX512-NEXT: [[TMP17:%.*]] = sitofp <8 x i32> [[WIDE_LOAD7]] to <8 x double> @@ -888,13 +888,13 @@ define void @foo3(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; AVX512-NEXT: [[TMP21:%.*]] = fadd <8 x double> [[WIDE_MASKED_LOAD10]], [[TMP17]] ; AVX512-NEXT: [[TMP22:%.*]] = fadd <8 x double> [[WIDE_MASKED_LOAD11]], [[TMP18]] ; AVX512-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 8 -; AVX512-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 16 -; AVX512-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 24 +; AVX512-NEXT: [[TMP24:%.*]] = getelementptr double, ptr [[TMP23]], i64 8 +; AVX512-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i64 16 +; AVX512-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i64 24 ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP19]], ptr align 8 [[TMP23]], <8 x i1> [[TMP6]]), !alias.scope [[META17:![0-9]+]], !noalias [[META19:![0-9]+]] -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP20]], ptr align 8 [[TMP25]], <8 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP21]], ptr align 8 [[TMP26]], <8 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP22]], ptr align 8 [[TMP27]], <8 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP20]], ptr align 8 [[TMP24]], <8 x i1> [[TMP7]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP21]], ptr align 8 [[TMP25]], <8 x i1> [[TMP8]]), !alias.scope [[META17]], !noalias [[META19]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[TMP22]], ptr align 8 [[TMP26]], <8 x i1> [[TMP9]]), !alias.scope [[META17]], !noalias [[META19]] ; AVX512-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; AVX512-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 9984 ; AVX512-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] @@ -1117,68 +1117,68 @@ define void @foo6(ptr nocapture readonly %in, ptr nocapture %out, i32 %size, ptr ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 4095, [[INDEX]] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]] -; AVX2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 -3 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -4 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 -3 -; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -8 -; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 -3 -; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -12 -; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -3 -; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META22:![0-9]+]] +; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 0 +; AVX2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 -3 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -4 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 -3 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -8 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 -3 +; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -12 +; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i64 -3 +; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META22:![0-9]+]] ; AVX2-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META22]] +; AVX2-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META22]] ; AVX2-NEXT: [[REVERSE7:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD6]], <4 x i32> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4, !alias.scope [[META22]] +; AVX2-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META22]] ; AVX2-NEXT: [[REVERSE9:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD8]], <4 x i32> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META22]] +; AVX2-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4, !alias.scope [[META22]] ; AVX2-NEXT: [[REVERSE11:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD10]], <4 x i32> poison, <4 x i32> ; AVX2-NEXT: [[TMP10:%.*]] = icmp sgt <4 x i32> [[REVERSE]], zeroinitializer ; AVX2-NEXT: [[TMP11:%.*]] = icmp sgt <4 x i32> [[REVERSE7]], zeroinitializer ; AVX2-NEXT: [[TMP12:%.*]] = icmp sgt <4 x i32> [[REVERSE9]], zeroinitializer ; AVX2-NEXT: [[TMP13:%.*]] = icmp sgt <4 x i32> [[REVERSE11]], zeroinitializer ; AVX2-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[IN]], i64 [[OFFSET_IDX]] -; AVX2-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP14]], i32 0 -; AVX2-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP15]], i32 -3 -; AVX2-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP14]], i32 -4 -; AVX2-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP17]], i32 -3 -; AVX2-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP14]], i32 -8 -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP19]], i32 -3 -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP14]], i32 -12 -; AVX2-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP21]], i32 -3 +; AVX2-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP14]], i64 0 +; AVX2-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP22]], i64 -3 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP14]], i64 -4 +; AVX2-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP16]], i64 -3 +; AVX2-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP14]], i64 -8 +; AVX2-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP18]], i64 -3 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP14]], i64 -12 +; AVX2-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP20]], i64 -3 ; AVX2-NEXT: [[REVERSE12:%.*]] = shufflevector <4 x i1> [[TMP10]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP16]], <4 x i1> [[REVERSE12]], <4 x double> poison), !alias.scope [[META25:![0-9]+]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP15]], <4 x i1> [[REVERSE12]], <4 x double> poison), !alias.scope [[META25:![0-9]+]] ; AVX2-NEXT: [[REVERSE13:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[REVERSE14:%.*]] = shufflevector <4 x i1> [[TMP11]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP18]], <4 x i1> [[REVERSE14]], <4 x double> poison), !alias.scope [[META25]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP17]], <4 x i1> [[REVERSE14]], <4 x double> poison), !alias.scope [[META25]] ; AVX2-NEXT: [[REVERSE16:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD15]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[REVERSE17:%.*]] = shufflevector <4 x i1> [[TMP12]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP20]], <4 x i1> [[REVERSE17]], <4 x double> poison), !alias.scope [[META25]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP19]], <4 x i1> [[REVERSE17]], <4 x double> poison), !alias.scope [[META25]] ; AVX2-NEXT: [[REVERSE19:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD18]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[REVERSE20:%.*]] = shufflevector <4 x i1> [[TMP13]], <4 x i1> poison, <4 x i32> -; AVX2-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP22]], <4 x i1> [[REVERSE20]], <4 x double> poison), !alias.scope [[META25]] +; AVX2-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr align 8 [[TMP21]], <4 x i1> [[REVERSE20]], <4 x double> poison), !alias.scope [[META25]] ; AVX2-NEXT: [[REVERSE22:%.*]] = shufflevector <4 x double> [[WIDE_MASKED_LOAD21]], <4 x double> poison, <4 x i32> ; AVX2-NEXT: [[TMP23:%.*]] = fadd <4 x double> [[REVERSE13]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP24:%.*]] = fadd <4 x double> [[REVERSE16]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP25:%.*]] = fadd <4 x double> [[REVERSE19]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP26:%.*]] = fadd <4 x double> [[REVERSE22]], splat (double 5.000000e-01) ; AVX2-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[OUT]], i64 [[OFFSET_IDX]] -; AVX2-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP27]], i32 0 -; AVX2-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP28]], i32 -3 -; AVX2-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP27]], i32 -4 -; AVX2-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP30]], i32 -3 -; AVX2-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP27]], i32 -8 -; AVX2-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP32]], i32 -3 -; AVX2-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP27]], i32 -12 -; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP34]], i32 -3 +; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP27]], i64 0 +; AVX2-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP35]], i64 -3 +; AVX2-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP27]], i64 -4 +; AVX2-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP29]], i64 -3 +; AVX2-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP27]], i64 -8 +; AVX2-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP31]], i64 -3 +; AVX2-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP27]], i64 -12 +; AVX2-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP33]], i64 -3 ; AVX2-NEXT: [[REVERSE24:%.*]] = shufflevector <4 x double> [[TMP23]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE24]], ptr align 8 [[TMP29]], <4 x i1> [[REVERSE12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META29:![0-9]+]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE24]], ptr align 8 [[TMP28]], <4 x i1> [[REVERSE12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META29:![0-9]+]] ; AVX2-NEXT: [[REVERSE26:%.*]] = shufflevector <4 x double> [[TMP24]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE26]], ptr align 8 [[TMP31]], <4 x i1> [[REVERSE14]]), !alias.scope [[META27]], !noalias [[META29]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE26]], ptr align 8 [[TMP30]], <4 x i1> [[REVERSE14]]), !alias.scope [[META27]], !noalias [[META29]] ; AVX2-NEXT: [[REVERSE28:%.*]] = shufflevector <4 x double> [[TMP25]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE28]], ptr align 8 [[TMP33]], <4 x i1> [[REVERSE17]]), !alias.scope [[META27]], !noalias [[META29]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE28]], ptr align 8 [[TMP32]], <4 x i1> [[REVERSE17]]), !alias.scope [[META27]], !noalias [[META29]] ; AVX2-NEXT: [[REVERSE30:%.*]] = shufflevector <4 x double> [[TMP26]], <4 x double> poison, <4 x i32> -; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE30]], ptr align 8 [[TMP35]], <4 x i1> [[REVERSE20]]), !alias.scope [[META27]], !noalias [[META29]] +; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[REVERSE30]], ptr align 8 [[TMP34]], <4 x i1> [[REVERSE20]]), !alias.scope [[META27]], !noalias [[META29]] ; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX2-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 ; AVX2-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] @@ -1208,68 +1208,68 @@ define void @foo6(ptr nocapture readonly %in, ptr nocapture %out, i32 %size, ptr ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[OFFSET_IDX:%.*]] = sub i64 4095, [[INDEX]] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], i64 [[OFFSET_IDX]] -; AVX512-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 -7 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -8 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 -7 -; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -16 -; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 -7 -; AVX512-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 -24 -; AVX512-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -7 -; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4, !alias.scope [[META34:![0-9]+]] +; AVX512-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 0 +; AVX512-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 -7 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -8 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 -7 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -16 +; AVX512-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 -7 +; AVX512-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 -24 +; AVX512-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i64 -7 +; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4, !alias.scope [[META34:![0-9]+]] ; AVX512-NEXT: [[REVERSE:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD]], <8 x i32> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4, !alias.scope [[META34]] +; AVX512-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP4]], align 4, !alias.scope [[META34]] ; AVX512-NEXT: [[REVERSE7:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD6]], <8 x i32> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP7]], align 4, !alias.scope [[META34]] +; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META34]] ; AVX512-NEXT: [[REVERSE9:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD8]], <8 x i32> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_LOAD10:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META34]] +; AVX512-NEXT: [[WIDE_LOAD10:%.*]] = load <8 x i32>, ptr [[TMP8]], align 4, !alias.scope [[META34]] ; AVX512-NEXT: [[REVERSE11:%.*]] = shufflevector <8 x i32> [[WIDE_LOAD10]], <8 x i32> poison, <8 x i32> ; AVX512-NEXT: [[TMP10:%.*]] = icmp sgt <8 x i32> [[REVERSE]], zeroinitializer ; AVX512-NEXT: [[TMP11:%.*]] = icmp sgt <8 x i32> [[REVERSE7]], zeroinitializer ; AVX512-NEXT: [[TMP12:%.*]] = icmp sgt <8 x i32> [[REVERSE9]], zeroinitializer ; AVX512-NEXT: [[TMP13:%.*]] = icmp sgt <8 x i32> [[REVERSE11]], zeroinitializer ; AVX512-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[IN]], i64 [[OFFSET_IDX]] -; AVX512-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP14]], i32 0 -; AVX512-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP15]], i32 -7 -; AVX512-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP14]], i32 -8 -; AVX512-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP17]], i32 -7 -; AVX512-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP14]], i32 -16 -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP19]], i32 -7 -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP14]], i32 -24 -; AVX512-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP21]], i32 -7 +; AVX512-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[TMP14]], i64 0 +; AVX512-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[TMP22]], i64 -7 +; AVX512-NEXT: [[TMP16:%.*]] = getelementptr double, ptr [[TMP14]], i64 -8 +; AVX512-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP16]], i64 -7 +; AVX512-NEXT: [[TMP18:%.*]] = getelementptr double, ptr [[TMP14]], i64 -16 +; AVX512-NEXT: [[TMP19:%.*]] = getelementptr double, ptr [[TMP18]], i64 -7 +; AVX512-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP14]], i64 -24 +; AVX512-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[TMP20]], i64 -7 ; AVX512-NEXT: [[REVERSE12:%.*]] = shufflevector <8 x i1> [[TMP10]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP16]], <8 x i1> [[REVERSE12]], <8 x double> poison), !alias.scope [[META37:![0-9]+]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP15]], <8 x i1> [[REVERSE12]], <8 x double> poison), !alias.scope [[META37:![0-9]+]] ; AVX512-NEXT: [[REVERSE13:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[REVERSE14:%.*]] = shufflevector <8 x i1> [[TMP11]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP18]], <8 x i1> [[REVERSE14]], <8 x double> poison), !alias.scope [[META37]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD15:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP17]], <8 x i1> [[REVERSE14]], <8 x double> poison), !alias.scope [[META37]] ; AVX512-NEXT: [[REVERSE16:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD15]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[REVERSE17:%.*]] = shufflevector <8 x i1> [[TMP12]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP20]], <8 x i1> [[REVERSE17]], <8 x double> poison), !alias.scope [[META37]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD18:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP19]], <8 x i1> [[REVERSE17]], <8 x double> poison), !alias.scope [[META37]] ; AVX512-NEXT: [[REVERSE19:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD18]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[REVERSE20:%.*]] = shufflevector <8 x i1> [[TMP13]], <8 x i1> poison, <8 x i32> -; AVX512-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP22]], <8 x i1> [[REVERSE20]], <8 x double> poison), !alias.scope [[META37]] +; AVX512-NEXT: [[WIDE_MASKED_LOAD21:%.*]] = call <8 x double> @llvm.masked.load.v8f64.p0(ptr align 8 [[TMP21]], <8 x i1> [[REVERSE20]], <8 x double> poison), !alias.scope [[META37]] ; AVX512-NEXT: [[REVERSE22:%.*]] = shufflevector <8 x double> [[WIDE_MASKED_LOAD21]], <8 x double> poison, <8 x i32> ; AVX512-NEXT: [[TMP23:%.*]] = fadd <8 x double> [[REVERSE13]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP24:%.*]] = fadd <8 x double> [[REVERSE16]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP25:%.*]] = fadd <8 x double> [[REVERSE19]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP26:%.*]] = fadd <8 x double> [[REVERSE22]], splat (double 5.000000e-01) ; AVX512-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[OUT]], i64 [[OFFSET_IDX]] -; AVX512-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP27]], i32 0 -; AVX512-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP28]], i32 -7 -; AVX512-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP27]], i32 -8 -; AVX512-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP30]], i32 -7 -; AVX512-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP27]], i32 -16 -; AVX512-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP32]], i32 -7 -; AVX512-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP27]], i32 -24 -; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP34]], i32 -7 +; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP27]], i64 0 +; AVX512-NEXT: [[TMP28:%.*]] = getelementptr double, ptr [[TMP35]], i64 -7 +; AVX512-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP27]], i64 -8 +; AVX512-NEXT: [[TMP30:%.*]] = getelementptr double, ptr [[TMP29]], i64 -7 +; AVX512-NEXT: [[TMP31:%.*]] = getelementptr double, ptr [[TMP27]], i64 -16 +; AVX512-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP31]], i64 -7 +; AVX512-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[TMP27]], i64 -24 +; AVX512-NEXT: [[TMP34:%.*]] = getelementptr double, ptr [[TMP33]], i64 -7 ; AVX512-NEXT: [[REVERSE24:%.*]] = shufflevector <8 x double> [[TMP23]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE24]], ptr align 8 [[TMP29]], <8 x i1> [[REVERSE12]]), !alias.scope [[META39:![0-9]+]], !noalias [[META41:![0-9]+]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE24]], ptr align 8 [[TMP28]], <8 x i1> [[REVERSE12]]), !alias.scope [[META39:![0-9]+]], !noalias [[META41:![0-9]+]] ; AVX512-NEXT: [[REVERSE26:%.*]] = shufflevector <8 x double> [[TMP24]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE26]], ptr align 8 [[TMP31]], <8 x i1> [[REVERSE14]]), !alias.scope [[META39]], !noalias [[META41]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE26]], ptr align 8 [[TMP30]], <8 x i1> [[REVERSE14]]), !alias.scope [[META39]], !noalias [[META41]] ; AVX512-NEXT: [[REVERSE28:%.*]] = shufflevector <8 x double> [[TMP25]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE28]], ptr align 8 [[TMP33]], <8 x i1> [[REVERSE17]]), !alias.scope [[META39]], !noalias [[META41]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE28]], ptr align 8 [[TMP32]], <8 x i1> [[REVERSE17]]), !alias.scope [[META39]], !noalias [[META41]] ; AVX512-NEXT: [[REVERSE30:%.*]] = shufflevector <8 x double> [[TMP26]], <8 x double> poison, <8 x i32> -; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE30]], ptr align 8 [[TMP35]], <8 x i1> [[REVERSE20]]), !alias.scope [[META39]], !noalias [[META41]] +; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> [[REVERSE30]], ptr align 8 [[TMP34]], <8 x i1> [[REVERSE20]]), !alias.scope [[META39]], !noalias [[META41]] ; AVX512-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; AVX512-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 ; AVX512-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] @@ -1332,9 +1332,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1: [[VECTOR_BODY]]: ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX1-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX1-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1348,9 +1348,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX1-NEXT: [[TMP17:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX1-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 4 -; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 8 -; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 12 +; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 4 +; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 8 +; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 12 ; AVX1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP14]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP21]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1364,9 +1364,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP17]], <4 x i1> [[TMP30]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1424,9 +1424,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX2-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1440,9 +1440,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX2-NEXT: [[TMP17:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX2-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 4 -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX2-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 12 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 4 +; AVX2-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX2-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 12 ; AVX2-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP18]], <4 x i1> [[TMP14]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP21]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1456,9 +1456,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP17]], <4 x i1> [[TMP30]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1516,9 +1516,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 24 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 24 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 ; AVX512-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP3]], align 1 ; AVX512-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1 @@ -1532,9 +1532,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP16:%.*]] = icmp ne <8 x i8> [[TMP8]], zeroinitializer ; AVX512-NEXT: [[TMP17:%.*]] = icmp ne <8 x i8> [[TMP9]], zeroinitializer ; AVX512-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 16 -; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 24 +; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 16 +; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 24 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP18]], <8 x i1> [[TMP14]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP20]], <8 x i1> [[TMP15]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP21]], <8 x i1> [[TMP16]], <8 x ptr> poison) @@ -1548,9 +1548,9 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP33:%.*]] = select <8 x i1> [[TMP16]], <8 x i1> [[TMP29]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP34:%.*]] = select <8 x i1> [[TMP17]], <8 x i1> [[TMP30]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 16 -; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 24 +; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 16 +; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 24 ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <8 x i1> [[TMP31]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <8 x i1> [[TMP32]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <8 x i1> [[TMP33]]) @@ -1653,9 +1653,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1: [[VECTOR_BODY]]: ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX1-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX1-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1669,9 +1669,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX1-NEXT: [[TMP17:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX1-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 4 -; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 12 +; AVX1-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 4 +; AVX1-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX1-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 12 ; AVX1-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP18]], <4 x i1> [[TMP14]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX1-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP21]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1685,9 +1685,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP17]], <4 x i1> [[TMP30]], <4 x i1> zeroinitializer ; AVX1-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX1-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX1-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX1-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX1-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1745,9 +1745,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2: [[VECTOR_BODY]]: ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 4 -; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 12 +; AVX2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 4 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 12 ; AVX2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 ; AVX2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1 ; AVX2-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1 @@ -1761,9 +1761,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP16:%.*]] = icmp ne <4 x i8> [[TMP8]], zeroinitializer ; AVX2-NEXT: [[TMP12:%.*]] = icmp ne <4 x i8> [[TMP9]], zeroinitializer ; AVX2-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 4 -; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 8 -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr ptr, ptr [[TMP13]], i32 12 +; AVX2-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 4 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 8 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr ptr, ptr [[TMP13]], i64 12 ; AVX2-NEXT: [[WIDE_MASKED_LOAD6:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP13]], <4 x i1> [[TMP17]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP19]], <4 x i1> [[TMP15]], <4 x ptr> poison) ; AVX2-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr align 8 [[TMP20]], <4 x i1> [[TMP16]], <4 x ptr> poison) @@ -1777,9 +1777,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[TMP33:%.*]] = select <4 x i1> [[TMP16]], <4 x i1> [[TMP29]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP34:%.*]] = select <4 x i1> [[TMP12]], <4 x i1> [[TMP21]], <4 x i1> zeroinitializer ; AVX2-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 4 -; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 12 +; AVX2-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 4 +; AVX2-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX2-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 12 ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <4 x i1> [[TMP31]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <4 x i1> [[TMP32]]) ; AVX2-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <4 x i1> [[TMP33]]) @@ -1837,9 +1837,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512: [[VECTOR_BODY]]: ; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; AVX512-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TRIGGER]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 8 -; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 -; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 24 +; AVX512-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8 +; AVX512-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16 +; AVX512-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 24 ; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 ; AVX512-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP3]], align 1 ; AVX512-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1 @@ -1853,9 +1853,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP16:%.*]] = icmp ne <8 x i8> [[TMP8]], zeroinitializer ; AVX512-NEXT: [[TMP17:%.*]] = icmp ne <8 x i8> [[TMP9]], zeroinitializer ; AVX512-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[IN]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 8 -; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 16 -; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i32 24 +; AVX512-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 8 +; AVX512-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 16 +; AVX512-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[TMP18]], i64 24 ; AVX512-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP18]], <8 x i1> [[TMP14]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP20]], <8 x i1> [[TMP15]], <8 x ptr> poison) ; AVX512-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call <8 x ptr> @llvm.masked.load.v8p0.p0(ptr align 8 [[TMP21]], <8 x i1> [[TMP16]], <8 x ptr> poison) @@ -1869,9 +1869,9 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[TMP33:%.*]] = select <8 x i1> [[TMP16]], <8 x i1> [[TMP29]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP34:%.*]] = select <8 x i1> [[TMP17]], <8 x i1> [[TMP30]], <8 x i1> zeroinitializer ; AVX512-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[OUT]], i64 [[INDEX]] -; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i32 8 -; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i32 16 -; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i32 24 +; AVX512-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[TMP35]], i64 8 +; AVX512-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP35]], i64 16 +; AVX512-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[TMP35]], i64 24 ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP35]], <8 x i1> [[TMP31]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP37]], <8 x i1> [[TMP32]]) ; AVX512-NEXT: call void @llvm.masked.store.v8f64.p0(<8 x double> splat (double 5.000000e-01), ptr align 8 [[TMP38]], <8 x i1> [[TMP33]]) diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll index e23f8a9b63ef0..d514ab6bc72b7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll @@ -1186,13 +1186,13 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2: vector.body: ; O1VEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDEX]] -; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 4 +; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 4 ; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; O1VEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; O1VEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] ; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDEX]] -; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 4 +; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i64 4 ; O1VEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4 ; O1VEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4 ; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -1214,13 +1214,13 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2: vector.body: ; OzVEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[INDEX]] -; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 4 +; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 4 ; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; OzVEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; OzVEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] ; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[INDEX]] -; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 4 +; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i64 4 ; OzVEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP6]], align 4 ; OzVEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4 ; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll index de6418066dea0..2809a77b36f1a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/multi-exit-cost.ll @@ -30,8 +30,8 @@ define i64 @test_value_in_exit_compare_chain_used_outside(ptr %src, i64 %x, i64 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP10]], 1 ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP18]] -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0 -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP27]], i32 -7 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP26]], i64 0 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP12]], i64 -7 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP28]], align 1 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <8 x i8> [[WIDE_LOAD]], <8 x i8> poison, <8 x i32> ; CHECK-NEXT: [[TMP29]] = xor <8 x i8> [[REVERSE]], [[VEC_PHI]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll index 31269b1b8c221..85d77eaadc632 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll @@ -35,17 +35,17 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT12]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i32 4 -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i32 8 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i32 12 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 8 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP5]], align 8, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP6]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP7]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP8]], align 8, !alias.scope [[META0]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT10]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i32 4 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i32 8 -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i32 12 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 8 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 12 ; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD]], ptr addrspace(1) [[TMP9]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] ; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD4]], ptr addrspace(1) [[TMP10]], align 8, !alias.scope [[META3]], !noalias [[META0]] ; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD5]], ptr addrspace(1) [[TMP11]], align 8, !alias.scope [[META3]], !noalias [[META0]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll index 3c618d71fc974..9217c905945ac 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll @@ -67,7 +67,7 @@ define i32 @main(ptr %ptr) { ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[DOTPROMOTED]], [[INDEX]] ; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[TMP20]] -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 4 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP22]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP25]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll b/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll index 878d288b918e4..4f8975f3d5e84 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr39160.ll @@ -1,75 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 ; RUN: opt -passes=loop-vectorize -S < %s 2>&1 | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1" target triple = "x86_64-unknown-linux-gnu" ; Make sure that we can compile the test without crash. -define void @barney(ptr %dst, i1 %arg) { - -; CHECK-LABEL: @barney( -; CHECK: middle.block: - -bb: - br label %bb2 - -bb2: ; preds = %bb2, %bb - %tmp4 = icmp slt i32 undef, 0 - br i1 %tmp4, label %bb2, label %bb5 - -bb5: ; preds = %bb2 - br label %bb19 - -bb18: ; preds = %bb33 - ret void - -bb19: ; preds = %bb36, %bb5 - %tmp21 = phi i64 [ undef, %bb36 ], [ 2, %bb5 ] - %tmp22 = phi i32 [ %tmp65, %bb36 ], [ undef, %bb5 ] - br label %bb50 - -bb33: ; preds = %bb62 - br i1 %arg, label %bb18, label %bb36 - -bb36: ; preds = %bb33 - br label %bb19 - -bb46: ; preds = %bb50 - br i1 %arg, label %bb48, label %bb59 - -bb48: ; preds = %bb46 - %tmp49 = add i32 %tmp52, 14 - ret void - -bb50: ; preds = %bb50, %bb19 - %tmp52 = phi i32 [ %tmp55, %bb50 ], [ %tmp22, %bb19 ] - %tmp53 = phi i64 [ %tmp56, %bb50 ], [ 1, %bb19 ] - %gep = getelementptr inbounds i8, ptr %dst, i64 %tmp53 - store i8 1, ptr %gep - %tmp54 = add i32 %tmp52, 12 - %tmp55 = add i32 %tmp52, 13 - %tmp56 = add nuw nsw i64 %tmp53, 1 - %tmp58 = icmp ult i64 %tmp53, undef - br i1 %tmp58, label %bb50, label %bb46 - -bb59: ; preds = %bb46 - br label %bb62 - -bb62: ; preds = %bb68, %bb59 - %tmp63 = phi i32 [ %tmp65, %bb68 ], [ %tmp55, %bb59 ] - %tmp64 = phi i64 [ %tmp66, %bb68 ], [ %tmp56, %bb59 ] - %tmp65 = add i32 %tmp63, 13 - %tmp66 = add nuw nsw i64 %tmp64, 1 - %tmp67 = icmp ult i64 %tmp66, %tmp21 - br i1 %tmp67, label %bb68, label %bb33 - -bb68: ; preds = %bb62 - br label %bb62 -} define i32 @foo(ptr addrspace(1) %p) { - -; CHECK-LABEL: foo -; CHECK: middle.block: +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[OUTER:.*]] +; CHECK: [[OUTER]]: +; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ [[INDVAR_NEXT:%.*]], %[[OUTER_LATCH:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[OUTER_LATCH]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDVAR]], 1 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 1, [[N_VEC]] +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[N_VEC]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 6, [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 8) +; CHECK-NEXT: [[TMP4]] = or <4 x i32> [[VEC_PHI]], [[VEC_IND]] +; CHECK-NEXT: [[TMP5]] = or <4 x i32> [[VEC_PHI1]], [[STEP_ADD]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD]], splat (i32 8) +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[BIN_RDX:%.*]] = or <4 x i32> [[TMP5]], [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[OUTER_LATCH]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[OUTER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ 1, %[[OUTER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ 6, %[[OUTER]] ] +; CHECK-NEXT: br label %[[INNER:.*]] +; CHECK: [[INNER]]: +; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP10:%.*]], %[[INNER]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[TMP11:%.*]], %[[INNER]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[TMP9:%.*]], %[[INNER]] ], [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[TMP9]] = add i32 [[B]], 2 +; CHECK-NEXT: [[TMP10]] = or i32 [[TMP8]], [[B]] +; CHECK-NEXT: [[TMP11]] = add nuw nsw i32 [[A]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 +; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[IV]], [[TMP12]] +; CHECK-NEXT: br i1 [[TMP13]], label %[[INNER]], label %[[OUTER_LATCH]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[OUTER_LATCH]]: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP10]], %[[INNER]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: store atomic i32 [[DOTLCSSA]], ptr addrspace(1) [[P]] unordered, align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[IV]], 63 +; CHECK-NEXT: [[INDVAR_NEXT]] = add i32 [[INDVAR]], 1 +; CHECK-NEXT: br i1 [[TMP14]], label %[[EXIT:.*]], label %[[OUTER]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i32 0 +; entry: br label %outer diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll index 737bcf35fbd2c..38db41271d1f6 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll @@ -124,7 +124,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; SSE41-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[TMP22]], [[TMP16]] ; SSE41-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[TMP23]], [[TMP17]] ; SSE41-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[D1:%.*]], i64 [[INDEX]] -; SSE41-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i32 4 +; SSE41-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i64 4 ; SSE41-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; SSE41-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP29]], align 4 ; SSE41-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -250,9 +250,9 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; AVX1-NEXT: [[TMP69:%.*]] = add nsw <4 x i32> [[TMP67]], [[TMP46]] ; AVX1-NEXT: [[TMP70:%.*]] = add nsw <4 x i32> [[TMP68]], [[TMP47]] ; AVX1-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[D1:%.*]], i64 [[INDEX]] -; AVX1-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 4 -; AVX1-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 8 -; AVX1-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 12 +; AVX1-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 4 +; AVX1-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 8 +; AVX1-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 12 ; AVX1-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; AVX1-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP26]], align 4 ; AVX1-NEXT: store <4 x i32> [[TMP69]], ptr [[TMP71]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll index 08855fe9ecba5..c756a54ec6d2b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll @@ -30,8 +30,8 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[ARR]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP6]], i32 0 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i32 -3 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP6]], i64 0 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i64 -3 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i1> [[TMP4]], <4 x i1> poison, <4 x i32> ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> splat (i64 1), ptr align 8 [[TMP8]], <4 x i1> [[REVERSE]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll index 2aceb279d47db..5a396f88b1a64 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/predicate-switch.ll @@ -76,7 +76,7 @@ define void @switch_default_to_latch_common_dest(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -214,7 +214,7 @@ define void @switch_default_to_latch_common_dest_using_branches(ptr %start, ptr ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -337,7 +337,7 @@ define void @switch_all_dests_distinct(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -527,7 +527,7 @@ define void @switch_all_dests_distinct_variant_using_branches(ptr %start, ptr %e ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -687,7 +687,7 @@ define void @switch_multiple_common_dests(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP23:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -836,7 +836,7 @@ define void @switch4_default_common_dest_with_case(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP15:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -1014,7 +1014,7 @@ define void @switch_under_br_default_common_dest_with_case(ptr %start, ptr %end, ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp ule <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] @@ -1167,7 +1167,7 @@ define void @br_under_switch_default_common_dest_with_case(ptr %start, ptr %end, ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 -12) @@ -1319,7 +1319,7 @@ define void @large_number_of_cases(ptr %start, ptr %end) { ; FORCED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; FORCED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; FORCED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]] -; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 4 +; FORCED-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 4 ; FORCED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[NEXT_GEP]], align 1 ; FORCED-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP8]], align 1 ; FORCED-NEXT: [[TMP9:%.*]] = icmp eq <4 x i64> [[WIDE_LOAD]], splat (i64 1) diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll index 52e90e4475208..3afdf947081b6 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll @@ -60,7 +60,7 @@ define float @reduction_sum_float_fastmath(i32 %n, ptr %array) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARRAY:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6]] = fadd fast <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] @@ -111,7 +111,7 @@ define float @reduction_sum_float_only_reassoc(i32 %n, ptr %array) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARRAY:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6]] = fadd reassoc <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] @@ -162,7 +162,7 @@ define float @reduction_sum_float_only_reassoc_and_contract(i32 %n, ptr %array) ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARRAY:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6]] = fadd reassoc contract <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] @@ -220,7 +220,7 @@ define float @PR35538(ptr nocapture readonly %a, i32 %N) #0 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = fcmp nnan ninf nsz oge <4 x float> [[WIDE_LOAD]], [[VEC_PHI]] @@ -301,7 +301,7 @@ define float @PR35538_more_FMF(ptr nocapture readonly %a, i32 %N) #0 { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+00), [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = fcmp nnan ninf oge <4 x float> [[WIDE_LOAD]], [[VEC_PHI]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index e99ffda9e4043..93cf59c019d5f 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -524,22 +524,78 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; induction is used outside the loop. define i64 @example23d(ptr noalias nocapture %src, ptr noalias nocapture %dst) optsize { ; CHECK-LABEL: @example23d( +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: ; CHECK-NEXT: br label [[TMP1:%.*]] -; CHECK: 1: -; CHECK-NEXT: [[DOT04:%.*]] = phi ptr [ [[SRC:%.*]], [[TMP0:%.*]] ], [ [[TMP2:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[DOT013:%.*]] = phi ptr [ [[DST:%.*]], [[TMP0]] ], [ [[TMP6:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP7:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[TMP2]] = getelementptr inbounds nuw i8, ptr [[DOT04]], i64 2 +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE14:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[TMP9]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[TMP2]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[TMP10]], i64 6 +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[TMP11]], i64 4 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[TMP32]], i64 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 12 +; CHECK-NEXT: [[TMP33:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 257) +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP33]], i64 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; CHECK: pred.store.if: +; CHECK-NEXT: [[DOT013:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[DOT04:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[DOT04]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 7 -; CHECK-NEXT: [[TMP6]] = getelementptr inbounds nuw i8, ptr [[DOT013]], i64 4 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DOT013]], align 4 -; CHECK-NEXT: [[TMP7]] = add nuw nsw i64 [[I_02]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP7]], 257 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[TMP8:%.*]], label [[TMP1]] -; CHECK: 8: -; CHECK-NEXT: ret i64 [[TMP7]] +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] +; CHECK: pred.store.continue: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP33]], i64 1 +; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]] +; CHECK: pred.store.if9: +; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[NEXT_GEP1]], align 2 +; CHECK-NEXT: [[TMP14:%.*]] = zext i16 [[TMP13]] to i32 +; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i32 [[TMP14]], 7 +; CHECK-NEXT: store i32 [[TMP15]], ptr [[NEXT_GEP6]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]] +; CHECK: pred.store.continue10: +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP33]], i64 2 +; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]] +; CHECK: pred.store.if11: +; CHECK-NEXT: [[TMP17:%.*]] = load i16, ptr [[NEXT_GEP2]], align 2 +; CHECK-NEXT: [[TMP18:%.*]] = zext i16 [[TMP17]] to i32 +; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i32 [[TMP18]], 7 +; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP7]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]] +; CHECK: pred.store.continue12: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP33]], i64 3 +; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14]] +; CHECK: pred.store.if13: +; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP3]], align 2 +; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32 +; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i32 [[TMP22]], 7 +; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP8]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]] +; CHECK: pred.store.continue14: +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) +; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 +; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[TMP1]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: br label [[TMP30:%.*]] +; CHECK: 25: +; CHECK-NEXT: [[TMP25:%.*]] = xor <4 x i1> [[TMP33]], splat (i1 true) +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP25]], i1 false) +; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], -1 +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = add nsw i64 [[TMP28]], 1 +; CHECK-NEXT: ret i64 [[TMP29]] ; br label %1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll index 602a3921eb34c..da48f984cb329 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll @@ -59,13 +59,13 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 8 -; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 16 -; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 24 +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 8 +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 16 +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP32]], align 4, !tbaa [[INT_TBAA1:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4, !tbaa [[INT_TBAA1]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP38]], align 4, !tbaa [[INT_TBAA1]] -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP39]], align 4, !tbaa [[INT_TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP33]], align 4, !tbaa [[INT_TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP34]], align 4, !tbaa [[INT_TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP35]], align 4, !tbaa [[INT_TBAA1]] ; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] ; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] ; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] @@ -290,13 +290,13 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 ; MAX-BW-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 ; MAX-BW-NEXT: [[TMP32:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP0]] -; MAX-BW-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 8 -; MAX-BW-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 16 -; MAX-BW-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 24 +; MAX-BW-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 8 +; MAX-BW-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 16 +; MAX-BW-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 24 ; MAX-BW-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP32]], align 4, !tbaa [[INT_TBAA1:![0-9]+]] -; MAX-BW-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4, !tbaa [[INT_TBAA1]] -; MAX-BW-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP38]], align 4, !tbaa [[INT_TBAA1]] -; MAX-BW-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP39]], align 4, !tbaa [[INT_TBAA1]] +; MAX-BW-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP33]], align 4, !tbaa [[INT_TBAA1]] +; MAX-BW-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP34]], align 4, !tbaa [[INT_TBAA1]] +; MAX-BW-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP35]], align 4, !tbaa [[INT_TBAA1]] ; MAX-BW-NEXT: [[TMP40:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] ; MAX-BW-NEXT: [[TMP41:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] ; MAX-BW-NEXT: [[TMP42:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll index 8081c0e17f865..692ab3db0aa42 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_load.ll @@ -25,9 +25,9 @@ define void @foo(ptr nocapture noalias %A, i64 %N) #0 { ; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @inc, align 4 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x float> poison, float [[TMP1]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT]], <8 x float> poison, <8 x i32> zeroinitializer -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i32 16 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i32 24 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 24 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[A]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP5]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll index fda944e072d4a..714d01315e507 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll @@ -29,29 +29,29 @@ define void @vectorized(ptr noalias nocapture %A, ptr noalias nocapture readonly ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 4 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 8 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 12 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 8 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP6]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP7]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[TMP11:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] ; CHECK-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP13:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] ; CHECK-NEXT: [[TMP14:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] ; CHECK-NEXT: store <4 x float> [[TMP11]], ptr [[TMP6]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store <4 x float> [[TMP12]], ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store <4 x float> [[TMP13]], ptr [[TMP9]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store <4 x float> [[TMP14]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP12]], ptr [[TMP5]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP13]], ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP14]], ptr [[TMP7]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll index c8e3766aa936e..a792d2463e647 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll @@ -56,17 +56,17 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 16 -; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 32 -; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 48 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 16 +; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 32 +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 48 ; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i32>, ptr [[TMP9]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i32>, ptr [[TMP10]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i32>, ptr [[TMP11]], align 4 ; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 16 -; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 32 -; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 48 +; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 16 +; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 32 +; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 48 ; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP12]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP17]], align 4 ; NO-VP-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i32>, ptr [[TMP18]], align 4 @@ -76,9 +76,9 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[TMP22:%.*]] = add nsw <16 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD3]] ; NO-VP-NEXT: [[TMP23:%.*]] = add nsw <16 x i32> [[WIDE_LOAD8]], [[WIDE_LOAD4]] ; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]] -; NO-VP-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 16 -; NO-VP-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 32 -; NO-VP-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 48 +; NO-VP-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 16 +; NO-VP-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 32 +; NO-VP-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 48 ; NO-VP-NEXT: store <16 x i32> [[TMP20]], ptr [[TMP24]], align 4 ; NO-VP-NEXT: store <16 x i32> [[TMP21]], ptr [[TMP29]], align 4 ; NO-VP-NEXT: store <16 x i32> [[TMP22]], ptr [[TMP30]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll index 8184cad22ae8b..26268f1ff4e94 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll @@ -18,9 +18,9 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i64> [[STEP_ADD]], splat (i64 4) ; CHECK-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i64> [[STEP_ADD_2]], splat (i64 4) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 4 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 8 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i64>, ptr [[TMP10]], align 8 @@ -36,9 +36,9 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP19:%.*]] = icmp ule <4 x i64> [[WIDE_LOAD6]], splat (i64 128) ; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 1 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP27]] -; CHECK-NEXT: [[TMP33:%.*]] = getelementptr i64, ptr [[TMP28]], i32 4 -; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i64, ptr [[TMP28]], i32 8 -; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP28]], i32 12 +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr i64, ptr [[TMP28]], i64 4 +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i64, ptr [[TMP28]], i64 8 +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP28]], i64 12 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP12]], ptr align 4 [[TMP28]], <4 x i1> [[TMP16]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP13]], ptr align 4 [[TMP33]], <4 x i1> [[TMP17]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP14]], ptr align 4 [[TMP34]], <4 x i1> [[TMP18]]) @@ -88,9 +88,9 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 4 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 8 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[TMP4]], i64 12 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i64>, ptr [[TMP10]], align 8 @@ -102,9 +102,9 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP19:%.*]] = icmp ule <4 x i64> [[WIDE_LOAD3]], splat (i64 128) ; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[TMP15]], 1 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP23]] -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i64, ptr [[TMP24]], i32 4 -; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[TMP24]], i32 8 -; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[TMP24]], i32 12 +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i64, ptr [[TMP24]], i64 4 +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[TMP24]], i64 8 +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[TMP24]], i64 12 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[WIDE_LOAD]], ptr align 4 [[TMP24]], <4 x i1> [[TMP16]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[WIDE_LOAD1]], ptr align 4 [[TMP29]], <4 x i1> [[TMP17]]) ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[WIDE_LOAD2]], ptr align 4 [[TMP30]], <4 x i1> [[TMP18]]) diff --git a/llvm/test/Transforms/LoopVectorize/assume.ll b/llvm/test/Transforms/LoopVectorize/assume.ll index a9a0b33f542af..eddd5f9ddc584 100644 --- a/llvm/test/Transforms/LoopVectorize/assume.ll +++ b/llvm/test/Transforms/LoopVectorize/assume.ll @@ -11,7 +11,7 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP7]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+02) @@ -27,7 +27,7 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 2 ; CHECK-NEXT: store <2 x float> [[TMP8]], ptr [[TMP10]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP9]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -80,13 +80,13 @@ define void @test2(ptr noalias %a, ptr noalias %b) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00) ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 2 ; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -151,13 +151,13 @@ define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x float> splat (float 2.300000e+01), <2 x float> splat (float 4.200000e+01) ; CHECK-NEXT: [[PREDPHI1:%.*]] = select <2 x i1> [[TMP2]], <2 x float> splat (float 2.300000e+01), <2 x float> splat (float 4.200000e+01) ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x float> [[PREDPHI]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[PREDPHI1]], [[WIDE_LOAD2]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 2 ; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4 ; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll index 1fe3962dfd072..6c63b823b7666 100644 --- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll +++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll @@ -130,8 +130,8 @@ define i32 @consecutive_ptr_reverse(ptr %a, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -3 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 -3 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP5]] = add <4 x i32> [[VEC_PHI]], [[REVERSE]] @@ -177,8 +177,8 @@ define i32 @consecutive_ptr_reverse(ptr %a, i64 %n) { ; INTER-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; INTER-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[N]], [[INDEX]] ; INTER-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]] -; INTER-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 -; INTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -3 +; INTER-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 0 +; INTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 -3 ; INTER-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 8 ; INTER-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> ; INTER-NEXT: [[TMP5]] = add <4 x i32> [[VEC_PHI]], [[REVERSE]] diff --git a/llvm/test/Transforms/LoopVectorize/cse-casts.ll b/llvm/test/Transforms/LoopVectorize/cse-casts.ll index fb45745eff1cb..4737a56df2735 100644 --- a/llvm/test/Transforms/LoopVectorize/cse-casts.ll +++ b/llvm/test/Transforms/LoopVectorize/cse-casts.ll @@ -19,7 +19,7 @@ define i8 @preserve_flags_when_cloning_trunc(i8 %start, ptr noalias %src, ptr no ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i1> [[TMP2]] to <4 x i16> ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP3]], ptr [[TMP4]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP3]], ptr [[TMP5]], align 2 ; CHECK-NEXT: [[TMP6]] = mul <4 x i8> [[VEC_PHI]], splat (i8 3) diff --git a/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll b/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll index 5d92c127aff93..901652537a5c5 100644 --- a/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll +++ b/llvm/test/Transforms/LoopVectorize/cse-gep-source-element-type.ll @@ -16,19 +16,19 @@ define void @cse_replicate_gep(ptr noalias %A, ptr noalias %B, ptr noalias %C, i ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TMP0]], i32 4 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TMP0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP8]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP8]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i16>, ptr [[TMP8]], align 2 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP3]], i32 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP3]], i64 4 ; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP3]], align 4 ; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD1]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[C]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP5]], i64 4 ; CHECK-NEXT: store <4 x i16> [[WIDE_LOAD2]], ptr [[TMP5]], align 2 ; CHECK-NEXT: store <4 x i16> [[WIDE_LOAD3]], ptr [[TMP6]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -79,11 +79,11 @@ define void @cse_wide_gep(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %n ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[A]], <4 x i64> [[VEC_IND]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[A]], <4 x i64> [[STEP_ADD]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX1]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x ptr> [[TMP0]], ptr [[TMP4]], align 8 ; CHECK-NEXT: store <4 x ptr> [[TMP1]], ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX1]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr ptr, ptr [[TMP6]], i32 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr ptr, ptr [[TMP6]], i64 4 ; CHECK-NEXT: store <4 x ptr> [[TMP2]], ptr [[TMP6]], align 8 ; CHECK-NEXT: store <4 x ptr> [[TMP3]], ptr [[TMP8]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll index 02e1d0e9e7004..6e5213568c735 100644 --- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll @@ -25,7 +25,7 @@ define i64 @dead_instructions_01(ptr %a, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP2]], align 8 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i64>, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[TMP6]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]] @@ -133,13 +133,13 @@ define void @dead_load_and_vector_pointer(ptr %a, ptr %b) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 2 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 2 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META8:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP5]], align 8, !alias.scope [[META5]], !noalias [[META8]] +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP1]], align 8, !alias.scope [[META5]], !noalias [[META8]] ; CHECK-NEXT: [[TMP6:%.*]] = add <2 x i32> [[WIDE_LOAD]], splat (i32 1) ; CHECK-NEXT: [[TMP7:%.*]] = add <2 x i32> [[WIDE_LOAD2]], splat (i32 1) ; CHECK-NEXT: store <2 x i32> [[TMP6]], ptr [[TMP2]], align 4, !alias.scope [[META5]], !noalias [[META8]] -; CHECK-NEXT: store <2 x i32> [[TMP7]], ptr [[TMP5]], align 4, !alias.scope [[META5]], !noalias [[META8]] +; CHECK-NEXT: store <2 x i32> [[TMP7]], ptr [[TMP1]], align 4, !alias.scope [[META5]], !noalias [[META8]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll index 274bd043cd86b..c23d28cdd0f3a 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll @@ -15,8 +15,8 @@ define dso_local void @constTC(ptr noalias nocapture %A) optsize { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 2 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 4 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 2 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 4 ; CHECK-NEXT: store <2 x i32> splat (i32 13), ptr [[TMP3]], align 1 ; CHECK-NEXT: store <2 x i32> splat (i32 13), ptr [[TMP7]], align 1 ; CHECK-NEXT: store <2 x i32> splat (i32 13), ptr [[TMP8]], align 1 diff --git a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll index 4af9f4a13b62b..50e55f6051485 100644 --- a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll +++ b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll @@ -26,7 +26,7 @@ define void @test(ptr %dst) personality ptr null { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], [[TMP1]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 4 ; CHECK-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP4]], align 8 ; CHECK-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll b/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll index 22226a711bcf0..5edd83bd1e0d1 100644 --- a/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fcmp-uno-fold-interleave.ll @@ -19,8 +19,8 @@ define float @fmaxnum(ptr %src, i64 %n) { ; IC3-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] ; IC3-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; IC3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]] -; IC3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4 -; IC3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 8 +; IC3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 4 +; IC3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 8 ; IC3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4 ; IC3-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IC3-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 @@ -71,9 +71,9 @@ define float @fmaxnum(ptr %src, i64 %n) { ; IC4-NEXT: [[VEC_PHI2:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; IC4-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]] -; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4 -; IC4-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 8 -; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 12 +; IC4-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 4 +; IC4-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 8 +; IC4-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 12 ; IC4-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4 ; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 @@ -129,10 +129,10 @@ define float @fmaxnum(ptr %src, i64 %n) { ; IC5-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; IC5-NEXT: [[VEC_PHI4:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] ; IC5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]] -; IC5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 4 -; IC5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 8 -; IC5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 12 -; IC5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i32 16 +; IC5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 4 +; IC5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 8 +; IC5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 12 +; IC5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[TMP0]], i64 16 ; IC5-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4 ; IC5-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IC5-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll index eca39e6f0b6ba..cf2e7ccd1b2f0 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll @@ -98,7 +98,7 @@ define i32 @sink_after_dead_inst(ptr %A.ptr) { ; CHECK-NEXT: [[TMP1:%.*]] = or <4 x i16> [[TMP0]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i32 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i64 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP3]], align 4 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 @@ -160,7 +160,7 @@ define void @sink_dead_inst(ptr %a) { ; CHECK-NEXT: [[TMP7:%.*]] = sub <4 x i16> [[TMP5]], splat (i16 10) ; CHECK-NEXT: [[TMP8:%.*]] = sub <4 x i16> [[TMP6]], splat (i16 10) ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[A]], i16 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[TMP9]], i32 4 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[TMP9]], i64 4 ; CHECK-NEXT: store <4 x i16> [[TMP7]], ptr [[TMP9]], align 2 ; CHECK-NEXT: store <4 x i16> [[TMP8]], ptr [[TMP11]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll index e97d6e66d9d7a..28b46726f80dc 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll @@ -6,59 +6,276 @@ define i32 @FOR_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP34]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 false) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; VF2IC1-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP28]] +; VF2IC1-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC1-NEXT: [[TMP31:%.*]] = icmp eq i64 [[TMP27]], 0 +; VF2IC1-NEXT: [[TMP32:%.*]] = select i1 [[TMP31]], i32 [[TMP30]], i32 [[TMP29]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF2IC1-NEXT: ret i32 [[TMP32]] ; ; VF2IC2-LABEL: define i32 @FOR_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP68:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP68]], ptr [[TMP67]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 false) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 false) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = sub i64 [[TMP57]], 1 +; VF2IC2-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP58]] +; VF2IC2-NEXT: [[TMP60:%.*]] = sub i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP60]] +; VF2IC2-NEXT: [[TMP62:%.*]] = icmp uge i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP63:%.*]] = select i1 [[TMP62]], i32 [[TMP61]], i32 [[TMP59]] +; VF2IC2-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC2-NEXT: [[TMP65:%.*]] = icmp eq i64 [[TMP57]], 0 +; VF2IC2-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i32 [[TMP64]], i32 [[TMP63]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF2IC2-NEXT: ret i32 [[TMP66]] ; ; VF1IC2-LABEL: define i32 @FOR_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP32]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = icmp uge i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: [[TMP29:%.*]] = icmp eq i64 [[TMP24]], 0 +; VF1IC2-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[VECTOR_RECUR]], i32 [[TMP28]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF1IC2-NEXT: ret i32 [[TMP30]] ; entry: @@ -83,59 +300,265 @@ for.end: define i32 @FOR_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_next_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP30]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 false) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP27]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP28:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] ; VF2IC1-NEXT: ret i32 [[TMP28]] ; ; VF2IC2-LABEL: define i32 @FOR_next_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP64:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP64]], ptr [[TMP63]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 false) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 false) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP57]] +; VF2IC2-NEXT: [[TMP59:%.*]] = sub i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP60:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP59]] +; VF2IC2-NEXT: [[TMP61:%.*]] = icmp uge i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP62:%.*]] = select i1 [[TMP61]], i32 [[TMP60]], i32 [[TMP58]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP62:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] ; VF2IC2-NEXT: ret i32 [[TMP62]] ; ; VF1IC2-LABEL: define i32 @FOR_next_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP29]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = icmp uge i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = select i1 [[TMP26]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP27:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] ; VF1IC2-NEXT: ret i32 [[TMP27]] ; entry: @@ -160,64 +583,287 @@ for.end: define i32 @FOR_and_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_and_next_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP35]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 false) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; VF2IC1-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP28]] +; VF2IC1-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC1-NEXT: [[TMP31:%.*]] = icmp eq i64 [[TMP27]], 0 +; VF2IC1-NEXT: [[TMP32:%.*]] = select i1 [[TMP31]], i32 [[TMP30]], i32 [[TMP29]] +; VF2IC1-NEXT: [[TMP33:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP27]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF2IC1-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] ; VF2IC1-NEXT: [[RES:%.*]] = add i32 [[TMP32]], [[TMP33]] ; VF2IC1-NEXT: ret i32 [[RES]] ; ; VF2IC2-LABEL: define i32 @FOR_and_next_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP73:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP73]], ptr [[TMP72]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 false) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 false) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = sub i64 [[TMP57]], 1 +; VF2IC2-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP58]] +; VF2IC2-NEXT: [[TMP60:%.*]] = sub i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP60]] +; VF2IC2-NEXT: [[TMP62:%.*]] = icmp uge i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP63:%.*]] = select i1 [[TMP62]], i32 [[TMP61]], i32 [[TMP59]] +; VF2IC2-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC2-NEXT: [[TMP65:%.*]] = icmp eq i64 [[TMP57]], 0 +; VF2IC2-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i32 [[TMP64]], i32 [[TMP63]] +; VF2IC2-NEXT: [[TMP67:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP57]] +; VF2IC2-NEXT: [[TMP68:%.*]] = sub i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP69:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP68]] +; VF2IC2-NEXT: [[TMP70:%.*]] = icmp uge i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP69]], i32 [[TMP67]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF2IC2-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] ; VF2IC2-NEXT: [[RES:%.*]] = add i32 [[TMP66]], [[TMP71]] ; VF2IC2-NEXT: ret i32 [[RES]] ; ; VF1IC2-LABEL: define i32 @FOR_and_next_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP35]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = icmp uge i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: [[TMP29:%.*]] = icmp eq i64 [[TMP24]], 0 +; VF1IC2-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[VECTOR_RECUR]], i32 [[TMP28]] +; VF1IC2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP32:%.*]] = icmp uge i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP33:%.*]] = select i1 [[TMP32]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF1IC2-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] ; VF1IC2-NEXT: [[RES:%.*]] = add i32 [[TMP30]], [[TMP33]] ; VF1IC2-NEXT: ret i32 [[RES]] ; diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll index cebd52fa7f866..063f47ce2b32d 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll @@ -33,7 +33,7 @@ define void @recurrence_1(ptr readonly noalias %a, ptr noalias %b, i32 %n) { ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]] -; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i32>, ptr [[TMP7]], align 4 ; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> @@ -41,7 +41,7 @@ define void @recurrence_1(ptr readonly noalias %a, ptr noalias %b, i32 %n) { ; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] ; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[TMP8]] ; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = add <4 x i32> [[WIDE_LOAD1]], [[TMP9]] -; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP10]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -231,7 +231,7 @@ define i32 @recurrence_2(ptr nocapture readonly %a, i32 %n) { ; UNROLL-NO-IC-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ poison, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ poison, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD2]] = load <4 x i32>, ptr [[TMP4]], align 4 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> @@ -485,7 +485,7 @@ define void @recurrence_3(ptr readonly noalias %a, ptr noalias %b, i32 %n, float ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]] ; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP4]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP4]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP4]], align 2 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i16>, ptr [[TMP6]], align 2 ; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> @@ -499,7 +499,7 @@ define void @recurrence_3(ptr readonly noalias %a, ptr noalias %b, i32 %n, float ; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = fsub fast <4 x double> [[TMP9]], [[TMP13]] ; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = fsub fast <4 x double> [[TMP10]], [[TMP14]] ; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP17]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP17]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x double> [[TMP15]], ptr [[TMP17]], align 8 ; UNROLL-NO-IC-NEXT: store <4 x double> [[TMP16]], ptr [[TMP19]], align 8 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -1700,7 +1700,7 @@ define void @sink_after(ptr noalias %a, ptr noalias %b, i64 %n) { ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[TMP1]] -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i16>, ptr [[TMP4]], align 2 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> @@ -1712,7 +1712,7 @@ define void @sink_after(ptr noalias %a, ptr noalias %b, i64 %n) { ; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = mul nsw <4 x i32> [[TMP9]], [[TMP7]] ; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = mul nsw <4 x i32> [[TMP10]], [[TMP8]] ; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP13]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP15]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -1915,7 +1915,7 @@ define void @PR34711(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %n) { ; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x i16], ptr [[A]], i64 [[TMP5]], i64 1 ; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i16], ptr [[A]], i64 [[TMP6]], i64 1 ; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i16], ptr [[A]], i64 [[TMP7]], i64 1 -; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> splat (i32 7), ptr [[TMP8]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> splat (i32 7), ptr [[TMP18]], align 4 ; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP9]], align 2 @@ -1943,7 +1943,7 @@ define void @PR34711(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %n) { ; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = mul nsw <4 x i32> [[TMP39]], [[TMP37]] ; UNROLL-NO-IC-NEXT: [[TMP42:%.*]] = mul nsw <4 x i32> [[TMP40]], [[TMP38]] ; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; UNROLL-NO-IC-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[TMP43]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[TMP43]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP43]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP45]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -2146,7 +2146,7 @@ define void @sink_after_with_multiple_users(ptr noalias %a, ptr noalias %b, i64 ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDEX]], 1 ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[TMP1]] -; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[TMP2]], i64 4 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 ; UNROLL-NO-IC-NEXT: [[WIDE_LOAD1]] = load <4 x i16>, ptr [[TMP4]], align 2 ; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> @@ -2160,7 +2160,7 @@ define void @sink_after_with_multiple_users(ptr noalias %a, ptr noalias %b, i64 ; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = mul nsw <4 x i32> [[TMP9]], [[TMP11]] ; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = mul nsw <4 x i32> [[TMP10]], [[TMP12]] ; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] -; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP17]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 @@ -2449,7 +2449,7 @@ define void @sink_dead_inst(ptr %a) { ; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = sub <4 x i16> [[TMP6]], splat (i16 10) ; UNROLL-NO-IC-NEXT: [[TMP9:%.*]] = sub <4 x i16> [[TMP7]], splat (i16 10) ; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[A:%.*]], i16 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = getelementptr i16, ptr [[TMP10]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = getelementptr i16, ptr [[TMP10]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i16> [[TMP8]], ptr [[TMP10]], align 2 ; UNROLL-NO-IC-NEXT: store <4 x i16> [[TMP9]], ptr [[TMP12]], align 2 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 @@ -3218,7 +3218,7 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = or <4 x i16> [[TMP1]], [[TMP1]] ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> ; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A_PTR:%.*]], i16 [[OFFSET_IDX]] -; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP4]], i32 4 +; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP4]], i64 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4 ; UNROLL-NO-IC-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP6]], align 4 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll index 0745f286b2608..0d9d28d079b92 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll @@ -53,7 +53,7 @@ define float @fmaxnum(ptr %src, i64 %n) { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -1.000000e+07), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC]], align 4 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP7]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]]) @@ -127,10 +127,10 @@ define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] ; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i32 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_0]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP_SRC_0]], align 4 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i32 4 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[GEP_SRC_1]], i64 4 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[GEP_SRC_1]], align 4 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4]] = call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VEC_PHI2]], <4 x float> [[WIDE_LOAD]]) diff --git a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll index ac767c68e0b25..87942911e915f 100644 --- a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll +++ b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads-with-predicated-stores.ll @@ -21,32 +21,20 @@ define void @test_stores_noalias_via_rt_checks_after_loads(ptr %dst, ptr %src, p ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE17:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE11:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) ; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META3:![0-9]+]] -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP12]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META3:![0-9]+]] ; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !alias.scope [[META3]] +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP9]], i32 0 ; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP16]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP18:%.*]] = phi <2 x i32> [ [[TMP13]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP17]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP18]], splat (i32 5) +; CHECK-NEXT: [[TMP19:%.*]] = sub <2 x i32> [[TMP17]], splat (i32 5) ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 ; CHECK-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] ; CHECK: [[PRED_STORE_IF]]: @@ -56,48 +44,30 @@ define void @test_stores_noalias_via_rt_checks_after_loads(ptr %dst, ptr %src, p ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] ; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP23]], label %[[PRED_STORE_IF8:.*]], label %[[PRED_STORE_CONTINUE9:.*]] -; CHECK: [[PRED_STORE_IF8]]: +; CHECK-NEXT: br i1 [[TMP23]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7:.*]] +; CHECK: [[PRED_STORE_IF6]]: ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i32> [[TMP19]], i32 1 ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4, !alias.scope [[META5]], !noalias [[META7]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE9]] -; CHECK: [[PRED_STORE_CONTINUE9]]: -; CHECK-NEXT: [[TMP26:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP26]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11:.*]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <2 x i32> poison, i32 [[TMP28]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP30:%.*]] = phi <2 x i32> [ poison, %[[PRED_STORE_CONTINUE9]] ], [ [[TMP29]], %[[PRED_LOAD_IF10]] ] -; CHECK-NEXT: [[TMP31:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP31]], label %[[PRED_LOAD_IF12:.*]], label %[[PRED_LOAD_CONTINUE13:.*]] -; CHECK: [[PRED_LOAD_IF12]]: -; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP34:%.*]] = insertelement <2 x i32> [[TMP30]], i32 [[TMP33]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE13]] -; CHECK: [[PRED_LOAD_CONTINUE13]]: -; CHECK-NEXT: [[TMP35:%.*]] = phi <2 x i32> [ [[TMP30]], %[[PRED_LOAD_CONTINUE11]] ], [ [[TMP34]], %[[PRED_LOAD_IF12]] ] -; CHECK-NEXT: [[TMP36:%.*]] = add <2 x i32> [[TMP35]], splat (i32 10) +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE7]] +; CHECK: [[PRED_STORE_CONTINUE7]]: +; CHECK-NEXT: [[TMP36:%.*]] = add <2 x i32> [[TMP17]], splat (i32 10) ; CHECK-NEXT: [[TMP37:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP37]], label %[[PRED_STORE_IF14:.*]], label %[[PRED_STORE_CONTINUE15:.*]] -; CHECK: [[PRED_STORE_IF14]]: +; CHECK-NEXT: br i1 [[TMP37]], label %[[PRED_STORE_IF8:.*]], label %[[PRED_STORE_CONTINUE9:.*]] +; CHECK: [[PRED_STORE_IF8]]: ; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i32> [[TMP36]], i32 0 ; CHECK-NEXT: store i32 [[TMP39]], ptr [[TMP38]], align 4, !alias.scope [[META5]], !noalias [[META7]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE15]] -; CHECK: [[PRED_STORE_CONTINUE15]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE9]] +; CHECK: [[PRED_STORE_CONTINUE9]]: ; CHECK-NEXT: [[TMP40:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF16:.*]], label %[[PRED_STORE_CONTINUE17]] -; CHECK: [[PRED_STORE_IF16]]: +; CHECK-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF10:.*]], label %[[PRED_STORE_CONTINUE11]] +; CHECK: [[PRED_STORE_IF10]]: ; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i32> [[TMP36]], i32 1 ; CHECK-NEXT: store i32 [[TMP42]], ptr [[TMP41]], align 4, !alias.scope [[META5]], !noalias [[META7]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE17]] -; CHECK: [[PRED_STORE_CONTINUE17]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE11]] +; CHECK: [[PRED_STORE_CONTINUE11]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP43]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -319,7 +289,7 @@ define void @test_noalias_store_via_runtime_checks(ptr %dst, ptr %dst.1, ptr %sr ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE30:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE28:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] @@ -327,79 +297,59 @@ define void @test_noalias_store_via_runtime_checks(ptr %dst, ptr %dst.1, ptr %sr ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) ; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP4]] ; CHECK-NEXT: store i32 10, ptr [[TMP10]], align 4, !alias.scope [[META25:![0-9]+]], !noalias [[META27:![0-9]+]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !alias.scope [[META30:![0-9]+]] -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP12]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP13]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_LOAD_IF19:.*]], label %[[PRED_LOAD_CONTINUE20:.*]] -; CHECK: [[PRED_LOAD_IF19]]: +; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]] +; CHECK: [[PRED_STORE_IF19]]: ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP5]] ; CHECK-NEXT: store i32 10, ptr [[TMP16]], align 4, !alias.scope [[META25]], !noalias [[META27]] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE20]] +; CHECK: [[PRED_STORE_CONTINUE20]]: +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META30:![0-9]+]] ; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META30]] +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> [[TMP14]], i32 [[TMP18]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE20]] -; CHECK: [[PRED_LOAD_CONTINUE20]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ [[TMP14]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP19]], %[[PRED_LOAD_IF19]] ] -; CHECK-NEXT: [[TMP21:%.*]] = sub <2 x i32> [[TMP20]], splat (i32 5) +; CHECK-NEXT: [[TMP21:%.*]] = sub <2 x i32> [[TMP19]], splat (i32 5) ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] +; CHECK: [[PRED_STORE_IF21]]: ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x i32> [[TMP21]], i32 0 ; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4, !alias.scope [[META31:![0-9]+]], !noalias [[META32:![0-9]+]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] -; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_CONTINUE22]]: ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] -; CHECK: [[PRED_STORE_IF21]]: +; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] +; CHECK: [[PRED_STORE_IF23]]: ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i32> [[TMP21]], i32 1 ; CHECK-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4, !alias.scope [[META31]], !noalias [[META32]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] -; CHECK: [[PRED_STORE_CONTINUE22]]: -; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_LOAD_IF23:.*]], label %[[PRED_LOAD_CONTINUE24:.*]] -; CHECK: [[PRED_LOAD_IF23]]: -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !alias.scope [[META30]] -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> poison, i32 [[TMP30]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE24]] -; CHECK: [[PRED_LOAD_CONTINUE24]]: -; CHECK-NEXT: [[TMP32:%.*]] = phi <2 x i32> [ poison, %[[PRED_STORE_CONTINUE22]] ], [ [[TMP31]], %[[PRED_LOAD_IF23]] ] -; CHECK-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP33]], label %[[PRED_LOAD_IF25:.*]], label %[[PRED_LOAD_CONTINUE26:.*]] -; CHECK: [[PRED_LOAD_IF25]]: -; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4, !alias.scope [[META30]] -; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i32> [[TMP32]], i32 [[TMP35]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE26]] -; CHECK: [[PRED_LOAD_CONTINUE26]]: -; CHECK-NEXT: [[TMP37:%.*]] = phi <2 x i32> [ [[TMP32]], %[[PRED_LOAD_CONTINUE24]] ], [ [[TMP36]], %[[PRED_LOAD_IF25]] ] -; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP37]], splat (i32 10) +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE24]] +; CHECK: [[PRED_STORE_CONTINUE24]]: +; CHECK-NEXT: [[TMP38:%.*]] = add <2 x i32> [[TMP19]], splat (i32 10) ; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP39]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]] -; CHECK: [[PRED_STORE_IF27]]: +; CHECK-NEXT: br i1 [[TMP39]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]] +; CHECK: [[PRED_STORE_IF25]]: ; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP38]], i32 0 ; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4, !alias.scope [[META31]], !noalias [[META32]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] -; CHECK: [[PRED_STORE_CONTINUE28]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE26]] +; CHECK: [[PRED_STORE_CONTINUE26]]: ; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30]] -; CHECK: [[PRED_STORE_IF29]]: +; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_IF27]]: ; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i32> [[TMP38]], i32 1 ; CHECK-NEXT: store i32 [[TMP44]], ptr [[TMP43]], align 4, !alias.scope [[META31]], !noalias [[META32]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE30]] -; CHECK: [[PRED_STORE_CONTINUE30]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_CONTINUE28]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP45:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP45]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] @@ -609,7 +559,7 @@ define void @test_memory_op_between_loads_no_alias_via_rt_checks(ptr %dst, ptr % ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE26:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE28:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] @@ -617,62 +567,56 @@ define void @test_memory_op_between_loads_no_alias_via_rt_checks(ptr %dst, ptr % ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) ; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP4]] ; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4, !alias.scope [[META48:![0-9]+]], !noalias [[META50:![0-9]+]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !alias.scope [[META53:![0-9]+]] -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP12]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP13]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_LOAD_IF19:.*]], label %[[PRED_LOAD_CONTINUE20:.*]] -; CHECK: [[PRED_LOAD_IF19]]: +; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]] +; CHECK: [[PRED_STORE_IF19]]: ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i32 [[TMP5]] ; CHECK-NEXT: store i32 0, ptr [[TMP16]], align 4, !alias.scope [[META48]], !noalias [[META50]] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE20]] +; CHECK: [[PRED_STORE_CONTINUE20]]: +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META53:![0-9]+]] ; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META53]] +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> [[TMP14]], i32 [[TMP18]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE20]] -; CHECK: [[PRED_LOAD_CONTINUE20]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ [[TMP14]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP19]], %[[PRED_LOAD_IF19]] ] -; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP20]], splat (i32 10) +; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP19]], splat (i32 10) ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] -; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] +; CHECK: [[PRED_STORE_IF21]]: ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x i32> [[TMP21]], i32 0 ; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4, !alias.scope [[META54:![0-9]+]], !noalias [[META55:![0-9]+]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] -; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_CONTINUE22]]: ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] -; CHECK: [[PRED_STORE_IF21]]: +; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] +; CHECK: [[PRED_STORE_IF23]]: ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i32> [[TMP21]], i32 1 ; CHECK-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4, !alias.scope [[META54]], !noalias [[META55]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] -; CHECK: [[PRED_STORE_CONTINUE22]]: -; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] -; CHECK: [[PRED_STORE_IF23]]: -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP29]], align 4, !alias.scope [[META53]] -; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] -; CHECK-NEXT: store i32 [[TMP34]], ptr [[TMP31]], align 4, !alias.scope [[META54]], !noalias [[META55]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE24]] ; CHECK: [[PRED_STORE_CONTINUE24]]: -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26]] +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 +; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]] ; CHECK: [[PRED_STORE_IF25]]: -; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP33]], align 4, !alias.scope [[META53]] -; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] -; CHECK-NEXT: store i32 [[TMP30]], ptr [[TMP35]], align 4, !alias.scope [[META54]], !noalias [[META55]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] +; CHECK-NEXT: store i32 [[TMP11]], ptr [[TMP31]], align 4, !alias.scope [[META54]], !noalias [[META55]] ; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE26]] ; CHECK: [[PRED_STORE_CONTINUE26]]: +; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 +; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_IF27]]: +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP5]] +; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP35]], align 4, !alias.scope [[META54]], !noalias [[META55]] +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_CONTINUE28]]: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP56:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll index e4c893f5269bb..f6dd8564c001b 100644 --- a/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/hoist-predicated-loads.ll @@ -21,51 +21,20 @@ define void @test(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> [[TMP10]], ptr [[TMP9]], i32 1 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP24]], align 4, !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP34:%.*]] = xor <2 x i1> [[TMP15]], splat (i1 true) -; CHECK-NEXT: [[TMP35:%.*]] = extractelement <2 x i1> [[TMP34]], i32 0 -; CHECK-NEXT: br i1 [[TMP35]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP8]], align 4, !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3]] ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP19]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP34]], i32 1 -; CHECK-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP36:%.*]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP23]], %[[PRED_LOAD_IF6]] ] +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[TMP7]], i32 1 ; CHECK-NEXT: [[TMP25:%.*]] = add <2 x i32> [[TMP36]], splat (i32 10) -; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP15]], i32 0 -; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP8]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> poison, i32 [[TMP26]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP33:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP31]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP15]], i32 1 -; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META3]] -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> [[TMP33]], i32 [[TMP27]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP29:%.*]] = phi <2 x i32> [ [[TMP33]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP28]], %[[PRED_LOAD_IF10]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP29]], <2 x i32> [[TMP25]] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP36]], <2 x i32> [[TMP25]] ; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP37]], align 4, !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 @@ -450,7 +419,7 @@ exit: ret void } -; Positive test: Same address with different alignments - should hoist with minimum alignment +; Make sure the minimum alignment is used when loads have different alignments. define void @different_alignments_same_address(ptr %dst, ptr %src, ptr %cond) { ; CHECK-LABEL: define void @different_alignments_same_address( ; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], ptr [[COND:%.*]]) { @@ -471,53 +440,22 @@ define void @different_alignments_same_address(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> [[TMP10]], ptr [[TMP9]], i32 1 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META36:![0-9]+]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP16:%.*]] = xor <2 x i1> [[TMP15]], splat (i1 true) -; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP16]], i32 0 -; CHECK-NEXT: br i1 [[TMP17]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP8]], align 4, !alias.scope [[META39:![0-9]+]] +; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP8]], align 2, !alias.scope [[META39:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP9]], align 2, !alias.scope [[META39]] ; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP35:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP19]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP16]], i32 1 -; CHECK-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP9]], align 4, !alias.scope [[META39]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i32> [[TMP35]], i32 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i32> [ [[TMP35]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP23]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP25:%.*]] = add <2 x i32> [[TMP24]], splat (i32 10) -; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP15]], i32 0 -; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP8]], align 2, !alias.scope [[META39]] -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> poison, i32 [[TMP26]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP33:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP31]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x i1> [[TMP15]], i32 1 -; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP9]], align 2, !alias.scope [[META39]] -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> [[TMP33]], i32 [[TMP27]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP29:%.*]] = phi <2 x i32> [ [[TMP33]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP28]], %[[PRED_LOAD_IF10]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP29]], <2 x i32> [[TMP25]] -; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] -; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP34]], align 4, !alias.scope [[META41:![0-9]+]], !noalias [[META43:![0-9]+]] +; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[TMP7]], i32 1 +; CHECK-NEXT: [[TMP26:%.*]] = add <2 x i32> [[TMP25]], splat (i32 10) +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> [[TMP26]] +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP35]], align 4, !alias.scope [[META41:![0-9]+]], !noalias [[META43:![0-9]+]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] @@ -642,50 +580,19 @@ define void @duplicate_gep(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META46:![0-9]+]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP7]], splat (i1 true) -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META49:![0-9]+]] -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> poison, i32 [[TMP11]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP12]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 -; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !alias.scope [[META49]] -; CHECK-NEXT: [[TMP17:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP16]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP18:%.*]] = phi <2 x i32> [ [[TMP13]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP17]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP19:%.*]] = add <2 x i32> [[TMP18]], splat (i32 10) -; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 -; CHECK-NEXT: br i1 [[TMP20]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP4]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !alias.scope [[META49]] +; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP10]], align 4, !alias.scope [[META49:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP15]], align 4, !alias.scope [[META49]] ; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP23]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[TMP7]], i32 1 -; CHECK-NEXT: br i1 [[TMP25]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[TMP5]] -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4, !alias.scope [[META49]] -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP27]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP29:%.*]] = phi <2 x i32> [ [[TMP24]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP28]], %[[PRED_LOAD_IF10]] ] +; CHECK-NEXT: [[TMP29:%.*]] = insertelement <2 x i32> [[TMP23]], i32 [[TMP8]], i32 1 +; CHECK-NEXT: [[TMP19:%.*]] = add <2 x i32> [[TMP29]], splat (i32 10) ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP7]], <2 x i32> [[TMP29]], <2 x i32> [[TMP19]] ; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP30]], align 4, !alias.scope [[META51:![0-9]+]], !noalias [[META53:![0-9]+]] @@ -752,50 +659,19 @@ define void @non_unit_stride_i64(ptr %dst, ptr %src, ptr %cond) { ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE11:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[COND]], i32 [[TMP6]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP8]], align 4, !alias.scope [[META56:![0-9]+]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp ule <2 x i32> [[WIDE_LOAD]], splat (i32 11) -; CHECK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP9]], splat (i1 true) -; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP10]], i32 0 -; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP6]] -; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META59:![0-9]+]] -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> poison, i32 [[TMP13]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP14]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP10]], i32 1 -; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] -; CHECK: [[PRED_LOAD_IF6]]: ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP7]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META59]] -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> [[TMP15]], i32 [[TMP18]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] -; CHECK: [[PRED_LOAD_CONTINUE7]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ [[TMP15]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP19]], %[[PRED_LOAD_IF6]] ] -; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP20]], splat (i32 10) -; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP9]], i32 0 -; CHECK-NEXT: br i1 [[TMP22]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] -; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP6]] -; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !alias.scope [[META59]] +; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP12]], align 4, !alias.scope [[META59:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP17]], align 4, !alias.scope [[META59]] ; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> poison, i32 [[TMP24]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] -; CHECK: [[PRED_LOAD_CONTINUE9]]: -; CHECK-NEXT: [[TMP26:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE7]] ], [ [[TMP25]], %[[PRED_LOAD_IF8]] ] -; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i1> [[TMP9]], i32 1 -; CHECK-NEXT: br i1 [[TMP27]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i32 [[TMP7]] -; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4, !alias.scope [[META59]] -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <2 x i32> [[TMP26]], i32 [[TMP29]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] -; CHECK: [[PRED_LOAD_CONTINUE11]]: -; CHECK-NEXT: [[TMP31:%.*]] = phi <2 x i32> [ [[TMP26]], %[[PRED_LOAD_CONTINUE9]] ], [ [[TMP30]], %[[PRED_LOAD_IF10]] ] +; CHECK-NEXT: [[TMP31:%.*]] = insertelement <2 x i32> [[TMP25]], i32 [[TMP10]], i32 1 +; CHECK-NEXT: [[TMP21:%.*]] = add <2 x i32> [[TMP31]], splat (i32 10) ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP9]], <2 x i32> [[TMP31]], <2 x i32> [[TMP21]] ; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP6]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP32]], align 4, !alias.scope [[META61:![0-9]+]], !noalias [[META63:![0-9]+]] @@ -1045,55 +921,15 @@ define void @hoist_predicated_load_with_chained_geps1(ptr %dst, ptr %src, i1 %co ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE8:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0 -; CHECK-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2, !alias.scope [[META68:![0-9]+]] -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i16> poison, i16 [[TMP6]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP8:%.*]] = phi <2 x i16> [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1 -; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] -; CHECK: [[PRED_LOAD_IF3]]: -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i16, ptr [[TMP11]], align 2, !alias.scope [[META68]] -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i16> [[TMP8]], i16 [[TMP12]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE4]] -; CHECK: [[PRED_LOAD_CONTINUE4]]: -; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i16> [ [[TMP8]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP13]], %[[PRED_LOAD_IF3]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] -; CHECK: [[PRED_LOAD_IF5]]: -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP15]], i64 8 -; CHECK-NEXT: [[TMP17:%.*]] = load i16, ptr [[TMP16]], align 2, !alias.scope [[META68]] -; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x i16> poison, i16 [[TMP17]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE6]] -; CHECK: [[PRED_LOAD_CONTINUE6]]: -; CHECK-NEXT: [[TMP19:%.*]] = phi <2 x i16> [ poison, %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP18]], %[[PRED_LOAD_IF5]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF7:.*]], label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_IF7]]: ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP2]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i64 8 -; CHECK-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META68]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i16> [[TMP19]], i16 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_CONTINUE8]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i16> [ [[TMP19]], %[[PRED_LOAD_CONTINUE6]] ], [ [[TMP23]], %[[PRED_LOAD_IF7]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[COND]], <2 x i16> [[TMP24]], <2 x i16> [[TMP14]] -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i16> [[PREDPHI]], i32 1 -; CHECK-NEXT: store i16 [[TMP25]], ptr [[DST]], align 2, !alias.scope [[META71:![0-9]+]], !noalias [[META68]] +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META68:![0-9]+]] +; CHECK-NEXT: store i16 [[TMP4]], ptr [[DST]], align 2, !alias.scope [[META71:![0-9]+]], !noalias [[META68]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP73:![0-9]+]] @@ -1145,55 +981,15 @@ define void @hoist_predicated_load_with_chained_geps2(ptr %dst, ptr %src, i1 %co ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP0:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE8:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [11 x i16], ptr [[SRC]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> [[TMP5]], ptr [[TMP4]], i32 1 -; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0 -; CHECK-NEXT: br i1 [[TMP7]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8 -; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[TMP8]], align 2, !alias.scope [[META75:![0-9]+]] -; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i16> poison, i16 [[TMP9]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP11:%.*]] = phi <2 x i16> [ poison, %[[VECTOR_BODY]] ], [ [[TMP10]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1 -; CHECK-NEXT: br i1 [[TMP12]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] -; CHECK: [[PRED_LOAD_IF3]]: -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i16, ptr [[TMP13]], align 2, !alias.scope [[META75]] -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i16> [[TMP11]], i16 [[TMP14]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE4]] -; CHECK: [[PRED_LOAD_CONTINUE4]]: -; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x i16> [ [[TMP11]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF3]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] -; CHECK: [[PRED_LOAD_IF5]]: -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8 -; CHECK-NEXT: [[TMP18:%.*]] = load i16, ptr [[TMP17]], align 2, !alias.scope [[META75]] -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x i16> poison, i16 [[TMP18]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE6]] -; CHECK: [[PRED_LOAD_CONTINUE6]]: -; CHECK-NEXT: [[TMP20:%.*]] = phi <2 x i16> [ poison, %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP19]], %[[PRED_LOAD_IF5]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[PRED_LOAD_IF7:.*]], label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_IF7]]: ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP4]], i64 8 -; CHECK-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META75]] -; CHECK-NEXT: [[TMP23:%.*]] = insertelement <2 x i16> [[TMP20]], i16 [[TMP22]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE8]] -; CHECK: [[PRED_LOAD_CONTINUE8]]: -; CHECK-NEXT: [[TMP24:%.*]] = phi <2 x i16> [ [[TMP20]], %[[PRED_LOAD_CONTINUE6]] ], [ [[TMP23]], %[[PRED_LOAD_IF7]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[COND]], <2 x i16> [[TMP24]], <2 x i16> [[TMP16]] -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i16> [[PREDPHI]], i32 1 -; CHECK-NEXT: store i16 [[TMP25]], ptr [[DST]], align 2, !alias.scope [[META78:![0-9]+]], !noalias [[META75]] +; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP21]], align 2, !alias.scope [[META75:![0-9]+]] +; CHECK-NEXT: store i16 [[TMP5]], ptr [[DST]], align 2, !alias.scope [[META78:![0-9]+]], !noalias [[META75]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP80:![0-9]+]] @@ -1262,7 +1058,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP15]], i32 0 ; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] ; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META62:![0-9]+]] +; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META82:![0-9]+]] ; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x i32> poison, i32 [[TMP17]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] ; CHECK: [[PRED_LOAD_CONTINUE]]: @@ -1270,7 +1066,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP15]], i32 1 ; CHECK-NEXT: br i1 [[TMP20]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] ; CHECK: [[PRED_LOAD_IF2]]: -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x i32> [[TMP19]], i32 [[TMP21]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE3]] ; CHECK: [[PRED_LOAD_CONTINUE3]]: @@ -1280,7 +1076,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP26:%.*]] = extractelement <2 x i1> [[TMP25]], i32 0 ; CHECK-NEXT: br i1 [[TMP26]], label %[[PRED_LOAD_IF4:.*]], label %[[PRED_LOAD_CONTINUE5:.*]] ; CHECK: [[PRED_LOAD_IF4]]: -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP28:%.*]] = insertelement <2 x i32> poison, i32 [[TMP27]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE5]] ; CHECK: [[PRED_LOAD_CONTINUE5]]: @@ -1288,7 +1084,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP25]], i32 1 ; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_LOAD_IF6:.*]], label %[[PRED_LOAD_CONTINUE7:.*]] ; CHECK: [[PRED_LOAD_IF6]]: -; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP32:%.*]] = insertelement <2 x i32> [[TMP29]], i32 [[TMP31]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE7]] ; CHECK: [[PRED_LOAD_CONTINUE7]]: @@ -1297,7 +1093,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP35:%.*]] = extractelement <2 x i1> [[TMP11]], i32 0 ; CHECK-NEXT: br i1 [[TMP35]], label %[[PRED_LOAD_IF8:.*]], label %[[PRED_LOAD_CONTINUE9:.*]] ; CHECK: [[PRED_LOAD_IF8]]: -; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP6]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP37:%.*]] = insertelement <2 x i32> poison, i32 [[TMP36]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE9]] ; CHECK: [[PRED_LOAD_CONTINUE9]]: @@ -1305,7 +1101,7 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[TMP39:%.*]] = extractelement <2 x i1> [[TMP11]], i32 1 ; CHECK-NEXT: br i1 [[TMP39]], label %[[PRED_LOAD_IF10:.*]], label %[[PRED_LOAD_CONTINUE11]] ; CHECK: [[PRED_LOAD_IF10]]: -; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META62]] +; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP7]], align 4, !alias.scope [[META82]] ; CHECK-NEXT: [[TMP41:%.*]] = insertelement <2 x i32> [[TMP38]], i32 [[TMP40]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE11]] ; CHECK: [[PRED_LOAD_CONTINUE11]]: @@ -1313,10 +1109,10 @@ define void @hoist_all_three_loads_at_same_address(ptr %dst, ptr %src, ptr noali ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP25]], <2 x i32> [[TMP34]], <2 x i32> [[TMP24]] ; CHECK-NEXT: [[PREDPHI16:%.*]] = select <2 x i1> [[TMP11]], <2 x i32> [[TMP42]], <2 x i32> [[PREDPHI]] ; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[TMP4]] -; CHECK-NEXT: store <2 x i32> [[PREDPHI16]], ptr [[TMP43]], align 4, !alias.scope [[META65:![0-9]+]], !noalias [[META62]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI16]], ptr [[TMP43]], align 4, !alias.scope [[META85:![0-9]+]], !noalias [[META82]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 -; CHECK-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP67:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP87:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[EXIT:label %.*]] ; CHECK: [[SCALAR_PH]]: diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion.ll b/llvm/test/Transforms/LoopVectorize/if-conversion.ll index a88a9b1466149..350c267445e54 100644 --- a/llvm/test/Transforms/LoopVectorize/if-conversion.ll +++ b/llvm/test/Transforms/LoopVectorize/if-conversion.ll @@ -237,22 +237,36 @@ for.end: ; preds = %for.inc, %entry ; Handle PHI with single incoming value having a full mask. ; PR34523 -; NOTE: Changing PHI inputs from undef to poison leads to change in -; behaviour of the test. Left as undef for now. -define void @PR34523() { -; CHECK-LABEL: define void @PR34523() { -; CHECK-NEXT: [[BB1:.*:]] -; CHECK-NEXT: br i1 true, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +define void @PR34523(ptr %p, i16 %val) { +; CHECK-LABEL: define void @PR34523( +; CHECK-SAME: ptr [[P:%.*]], i16 [[VAL:%.*]]) { +; CHECK-NEXT: [[BB1:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[VAL]], 1 +; CHECK-NEXT: [[SMAX:%.*]] = call i16 @llvm.smax.i16(i16 [[TMP0]], i16 2) +; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[VAL]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = add i16 [[SMAX]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], 1 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i16 [[TMP2]], 3 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[TMP4]], 131068 +; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = add i16 [[VAL]], [[DOTCAST]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: br i1 poison, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br i1 poison, label %[[BB5:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP4]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[BB5:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[VAL]], %[[BB1]] ] ; CHECK-NEXT: br label %[[BB2:.*]] ; CHECK: [[BB2]]: -; CHECK-NEXT: [[I:%.*]] = phi i16 [ undef, %[[SCALAR_PH]] ], [ [[_TMP2:%.*]], %[[BB4:.*]] ] +; CHECK-NEXT: [[I:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[_TMP2:%.*]], %[[BB4:.*]] ] ; CHECK-NEXT: br label %[[BB3:.*]] ; CHECK: [[BB3]]: ; CHECK-NEXT: br label %[[BB4]] @@ -267,11 +281,11 @@ bb1: br label %bb2 bb2: ; preds = %bb4, %bb1 - %i = phi i16 [ undef, %bb1 ], [ %_tmp2, %bb4 ] + %i = phi i16 [ %val, %bb1 ], [ %_tmp2, %bb4 ] br label %bb3 bb3: ; preds = %bb2 - %_tmp1 = phi ptr [ undef, %bb2 ] + %_tmp1 = phi ptr [ %p, %bb2 ] br label %bb4 bb4: ; preds = %bb3 diff --git a/llvm/test/Transforms/LoopVectorize/if-reduction.ll b/llvm/test/Transforms/LoopVectorize/if-reduction.ll index 73a2203c3115b..eab9df558f608 100644 --- a/llvm/test/Transforms/LoopVectorize/if-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/if-reduction.ll @@ -1648,8 +1648,8 @@ define i32 @fcmp_0_sub_select1(ptr noalias %x, i32 %N) nounwind readonly { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 0, [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 0 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 -3 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 -3 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = fcmp ogt <4 x float> [[REVERSE]], zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll index 34873319176d1..df8ade647d968 100644 --- a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll +++ b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll @@ -58,7 +58,7 @@ thread-pre-split.loopexit: ; preds = %11, %.thread-pre-sp br i1 %arg, label %11, label %22 ;